Safetensors
rt_detr_v2

train-DLNv1_DLNv2_WS2013_NoTables

#1
Files changed (4) hide show
  1. README.md +112 -3
  2. config.json +155 -0
  3. model.safetensors +3 -0
  4. preprocessor_config.json +26 -0
README.md CHANGED
@@ -1,3 +1,112 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+
5
+ THIS IS WORK IN PROGRESS
6
+
7
+
8
+ # Docling Layout Model
9
+
10
+ `docling-layout-heron-101` is a Document Layout Model based on [RT-DETRv2](https://github.com/lyuwenyu/RT-DETR/tree/main/rtdetrv2_pytorch) with `ResNet-101` backbone.
11
+
12
+ The model has been trained from scratch on a variety of document datasets.
13
+
14
+ It is part of the [Docling project](https://github.com/docling-project/docling).
15
+
16
+
17
+ # Inference code example
18
+
19
+ Prerequisites:
20
+
21
+ ```bash
22
+ pip install transformers Pillow torch requests
23
+ ```
24
+
25
+ Prediction:
26
+
27
+ ```python
28
+ import requests
29
+ from transformers import RTDetrV2ForObjectDetection, RTDetrImageProcessor
30
+ import torch
31
+ from PIL import Image
32
+
33
+
34
+ classes_map = {
35
+ 0: "Caption",
36
+ 1: "Footnote",
37
+ 2: "Formula",
38
+ 3: "List-item",
39
+ 4: "Page-footer",
40
+ 5: "Page-header",
41
+ 6: "Picture",
42
+ 7: "Section-header",
43
+ 8: "Table",
44
+ 9: "Text",
45
+ 10: "Title",
46
+ 11: "Document Index",
47
+ 12: "Code",
48
+ 13: "Checkbox-Selected",
49
+ 14: "Checkbox-Unselected",
50
+ 15: "Form",
51
+ 16: "Key-Value Region",
52
+ }
53
+ image_url = "https://huggingface.co/spaces/ds4sd/SmolDocling-256M-Demo/resolve/main/example_images/annual_rep_14.png"
54
+ model_name = "ds4sd/docling-layout-heron"
55
+ threshold = 0.6
56
+
57
+
58
+ # Download the image
59
+ image = Image.open(requests.get(image_url, stream=True).raw)
60
+ image = image.convert("RGB")
61
+
62
+ # Initialize the model
63
+ image_processor = RTDetrImageProcessor.from_pretrained(model_name)
64
+ model = RTDetrV2ForObjectDetection.from_pretrained(model_name)
65
+
66
+ # Run the prediction pipeline
67
+ inputs = image_processor(images=[image], return_tensors="pt")
68
+ with torch.no_grad():
69
+ outputs = model(**inputs)
70
+ results = image_processor.post_process_object_detection(
71
+ outputs,
72
+ target_sizes=torch.tensor([image.size[::-1]]),
73
+ threshold=threshold,
74
+ )
75
+
76
+ # Get the results
77
+ for result in results:
78
+ for score, label_id, box in zip(
79
+ result["scores"], result["labels"], result["boxes"]
80
+ ):
81
+ score = round(score.item(), 2)
82
+ label = classes_map[label_id.item()]
83
+ box = [round(i, 2) for i in box.tolist()]
84
+ print(f"{label}:{score} {box}")
85
+ ```
86
+
87
+
88
+ # References
89
+
90
+ ```
91
+ @techreport{Docling,
92
+ author = {Deep Search Team},
93
+ month = {8},
94
+ title = {Docling Technical Report},
95
+ url = {https://arxiv.org/abs/2408.09869v4},
96
+ eprint = {2408.09869},
97
+ doi = {10.48550/arXiv.2408.09869},
98
+ version = {1.0.0},
99
+ year = {2024}
100
+ }
101
+
102
+ @misc{lv2024rtdetrv2improvedbaselinebagoffreebies,
103
+ title={RT-DETRv2: Improved Baseline with Bag-of-Freebies for Real-Time Detection Transformer},
104
+ author={Wenyu Lv and Yian Zhao and Qinyao Chang and Kui Huang and Guanzhong Wang and Yi Liu},
105
+ year={2024},
106
+ eprint={2407.17140},
107
+ archivePrefix={arXiv},
108
+ primaryClass={cs.CV},
109
+ url={https://arxiv.org/abs/2407.17140},
110
+ }
111
+
112
+ ```
config.json ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "silu",
4
+ "anchor_image_size": null,
5
+ "architectures": [
6
+ "RTDetrV2ForObjectDetection"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "auxiliary_loss": true,
10
+ "backbone": null,
11
+ "backbone_config": {
12
+ "depths": [
13
+ 3,
14
+ 4,
15
+ 23,
16
+ 3
17
+ ],
18
+ "downsample_in_bottleneck": false,
19
+ "downsample_in_first_stage": false,
20
+ "embedding_size": 64,
21
+ "hidden_act": "relu",
22
+ "hidden_sizes": [
23
+ 256,
24
+ 512,
25
+ 1024,
26
+ 2048
27
+ ],
28
+ "layer_type": "bottleneck",
29
+ "model_type": "rt_detr_resnet",
30
+ "num_channels": 3,
31
+ "out_features": [
32
+ "stage2",
33
+ "stage3",
34
+ "stage4"
35
+ ],
36
+ "out_indices": [
37
+ 2,
38
+ 3,
39
+ 4
40
+ ],
41
+ "stage_names": [
42
+ "stem",
43
+ "stage1",
44
+ "stage2",
45
+ "stage3",
46
+ "stage4"
47
+ ]
48
+ },
49
+ "backbone_kwargs": null,
50
+ "batch_norm_eps": 1e-05,
51
+ "box_noise_scale": 1.0,
52
+ "d_model": 256,
53
+ "decoder_activation_function": "relu",
54
+ "decoder_attention_heads": 8,
55
+ "decoder_ffn_dim": 1024,
56
+ "decoder_in_channels": [
57
+ 384,
58
+ 384,
59
+ 384
60
+ ],
61
+ "decoder_layers": 6,
62
+ "decoder_method": "default",
63
+ "decoder_n_levels": 3,
64
+ "decoder_n_points": 4,
65
+ "decoder_offset_scale": 0.5,
66
+ "dropout": 0.0,
67
+ "encode_proj_layers": [
68
+ 2
69
+ ],
70
+ "encoder_activation_function": "gelu",
71
+ "encoder_attention_heads": 8,
72
+ "encoder_ffn_dim": 2048,
73
+ "encoder_hidden_dim": 384,
74
+ "encoder_in_channels": [
75
+ 512,
76
+ 1024,
77
+ 2048
78
+ ],
79
+ "encoder_layers": 1,
80
+ "eos_coefficient": 0.0001,
81
+ "eval_size": null,
82
+ "feat_strides": [
83
+ 8,
84
+ 16,
85
+ 32
86
+ ],
87
+ "focal_loss_alpha": 0.75,
88
+ "focal_loss_gamma": 2.0,
89
+ "freeze_backbone_batch_norms": true,
90
+ "hidden_expansion": 1.0,
91
+ "id2label": {
92
+ "0": "Caption",
93
+ "1": "Footnote",
94
+ "2": "Formula",
95
+ "3": "List-item",
96
+ "4": "Page-footer",
97
+ "5": "Page-header",
98
+ "6": "Picture",
99
+ "7": "Section-header",
100
+ "8": "Table",
101
+ "9": "Text",
102
+ "10": "Title",
103
+ "11": "Document Index",
104
+ "12": "Code",
105
+ "13": "Checkbox-Selected",
106
+ "14": "Checkbox-Unselected",
107
+ "15": "Form",
108
+ "16": "Key-Value Region"
109
+ },
110
+ "initializer_bias_prior_prob": null,
111
+ "initializer_range": 0.01,
112
+ "is_encoder_decoder": true,
113
+ "label2id": {
114
+ "Caption": 0,
115
+ "Checkbox-Selected": 13,
116
+ "Checkbox-Unselected": 14,
117
+ "Code": 12,
118
+ "Document Index": 11,
119
+ "Footnote": 1,
120
+ "Form": 15,
121
+ "Formula": 2,
122
+ "Key-Value Region": 16,
123
+ "List-item": 3,
124
+ "Page-footer": 4,
125
+ "Page-header": 5,
126
+ "Picture": 6,
127
+ "Section-header": 7,
128
+ "Table": 8,
129
+ "Text": 9,
130
+ "Title": 10
131
+ },
132
+ "label_noise_ratio": 0.5,
133
+ "layer_norm_eps": 1e-05,
134
+ "learn_initial_query": false,
135
+ "matcher_alpha": 0.25,
136
+ "matcher_bbox_cost": 5.0,
137
+ "matcher_class_cost": 2.0,
138
+ "matcher_gamma": 2.0,
139
+ "matcher_giou_cost": 2.0,
140
+ "model_type": "rt_detr_v2",
141
+ "normalize_before": false,
142
+ "num_denoising": 100,
143
+ "num_feature_levels": 3,
144
+ "num_queries": 300,
145
+ "positional_encoding_temperature": 10000,
146
+ "torch_dtype": "float32",
147
+ "transformers_version": "4.53.0.dev0",
148
+ "use_focal_loss": true,
149
+ "use_pretrained_backbone": false,
150
+ "use_timm_backbone": false,
151
+ "weight_loss_bbox": 5.0,
152
+ "weight_loss_giou": 2.0,
153
+ "weight_loss_vfl": 1.0,
154
+ "with_box_refine": true
155
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c11fe1df2087f29c77359834ac34700a5d9a41516b3387a3df0d7c3d52440c4e
3
+ size 306814140
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_annotations": true,
3
+ "do_normalize": false,
4
+ "do_pad": false,
5
+ "do_rescale": true,
6
+ "do_resize": true,
7
+ "format": "coco_detection",
8
+ "image_mean": [
9
+ 0.485,
10
+ 0.456,
11
+ 0.406
12
+ ],
13
+ "image_processor_type": "RTDetrImageProcessor",
14
+ "image_std": [
15
+ 0.229,
16
+ 0.224,
17
+ 0.225
18
+ ],
19
+ "pad_size": null,
20
+ "resample": 2,
21
+ "rescale_factor": 0.00392156862745098,
22
+ "size": {
23
+ "height": 640,
24
+ "width": 640
25
+ }
26
+ }