tk725 commited on
Commit
da12f44
·
verified ·
1 Parent(s): 712f83d

Upload 17 files

Browse files
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ flux1-dev-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
4x-ClearRealityV1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4cd3a25b00e0be949d4302fc774eb4d7f2ed5f47cdb51551e2d75fa6562e51e
3
+ size 9016074
ComfyUI_00044_%20%281%29.png ADDED

Git LFS Details

  • SHA256: 069a89804e6a56c4c4ae850e965afcc1c0c357c6b1c9211e47570968a204542e
  • Pointer size: 132 Bytes
  • Size of remote file: 1.77 MB
Flux%20Upscaler%20Ultimate%20SD.json ADDED
@@ -0,0 +1,1212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "a264aa86-8e40-4a80-b539-3a82daf6a53a",
3
+ "revision": 0,
4
+ "last_node_id": 21,
5
+ "last_link_id": 26,
6
+ "nodes": [
7
+ {
8
+ "id": 5,
9
+ "type": "Florence2Run",
10
+ "pos": [
11
+ -94.774169921875,
12
+ -28
13
+ ],
14
+ "size": [
15
+ 400,
16
+ 364
17
+ ],
18
+ "flags": {},
19
+ "order": 10,
20
+ "mode": 0,
21
+ "inputs": [
22
+ {
23
+ "name": "image",
24
+ "type": "IMAGE",
25
+ "link": 16
26
+ },
27
+ {
28
+ "name": "florence2_model",
29
+ "type": "FL2MODEL",
30
+ "link": 4
31
+ }
32
+ ],
33
+ "outputs": [
34
+ {
35
+ "name": "image",
36
+ "type": "IMAGE",
37
+ "links": null
38
+ },
39
+ {
40
+ "name": "mask",
41
+ "type": "MASK",
42
+ "links": null
43
+ },
44
+ {
45
+ "name": "caption",
46
+ "type": "STRING",
47
+ "links": [
48
+ 5
49
+ ]
50
+ },
51
+ {
52
+ "name": "data",
53
+ "type": "JSON",
54
+ "links": null
55
+ }
56
+ ],
57
+ "properties": {
58
+ "cnr_id": "comfyui-florence2",
59
+ "ver": "1.0.5",
60
+ "Node name for S&R": "Florence2Run",
61
+ "widget_ue_connectable": {
62
+ "text_input": true,
63
+ "task": true,
64
+ "fill_mask": true,
65
+ "keep_model_loaded": true,
66
+ "max_new_tokens": true,
67
+ "num_beams": true,
68
+ "do_sample": true,
69
+ "output_mask_select": true,
70
+ "seed": true
71
+ }
72
+ },
73
+ "widgets_values": [
74
+ "",
75
+ "detailed_caption",
76
+ true,
77
+ false,
78
+ 1024,
79
+ 3,
80
+ true,
81
+ "",
82
+ 795475834295105,
83
+ "randomize"
84
+ ]
85
+ },
86
+ {
87
+ "id": 14,
88
+ "type": "SaveImageWebsocket",
89
+ "pos": [
90
+ 1275.864990234375,
91
+ 203.62318420410156
92
+ ],
93
+ "size": [
94
+ 179.71542358398438,
95
+ 26
96
+ ],
97
+ "flags": {},
98
+ "order": 18,
99
+ "mode": 0,
100
+ "inputs": [
101
+ {
102
+ "name": "images",
103
+ "type": "IMAGE",
104
+ "link": 15
105
+ }
106
+ ],
107
+ "outputs": [],
108
+ "properties": {
109
+ "Node name for S&R": "SaveImageWebsocket",
110
+ "widget_ue_connectable": {}
111
+ },
112
+ "widgets_values": []
113
+ },
114
+ {
115
+ "id": 1,
116
+ "type": "UltimateSDUpscale",
117
+ "pos": [
118
+ 746.2258911132812,
119
+ 260
120
+ ],
121
+ "size": [
122
+ 315,
123
+ 826
124
+ ],
125
+ "flags": {},
126
+ "order": 15,
127
+ "mode": 0,
128
+ "inputs": [
129
+ {
130
+ "name": "image",
131
+ "type": "IMAGE",
132
+ "link": 17
133
+ },
134
+ {
135
+ "name": "model",
136
+ "type": "MODEL",
137
+ "link": 2
138
+ },
139
+ {
140
+ "name": "positive",
141
+ "type": "CONDITIONING",
142
+ "link": 7
143
+ },
144
+ {
145
+ "name": "negative",
146
+ "type": "CONDITIONING",
147
+ "link": 8
148
+ },
149
+ {
150
+ "name": "vae",
151
+ "type": "VAE",
152
+ "link": 9
153
+ },
154
+ {
155
+ "name": "upscale_model",
156
+ "type": "UPSCALE_MODEL",
157
+ "link": 10
158
+ }
159
+ ],
160
+ "outputs": [
161
+ {
162
+ "name": "IMAGE",
163
+ "type": "IMAGE",
164
+ "slot_index": 0,
165
+ "links": [
166
+ 11,
167
+ 15
168
+ ]
169
+ }
170
+ ],
171
+ "properties": {
172
+ "cnr_id": "comfyui_ultimatesdupscale",
173
+ "ver": "778a475dde8116a2066fe07f6c9ca15554e0b5be",
174
+ "Node name for S&R": "UltimateSDUpscale",
175
+ "widget_ue_connectable": {}
176
+ },
177
+ "widgets_values": [
178
+ 2,
179
+ 384340151733828,
180
+ "fixed",
181
+ 22,
182
+ 1,
183
+ "deis",
184
+ "beta",
185
+ 0.2,
186
+ "Linear",
187
+ 1024,
188
+ 1024,
189
+ 8,
190
+ 32,
191
+ "None",
192
+ 1,
193
+ 64,
194
+ 8,
195
+ 16,
196
+ true,
197
+ false
198
+ ]
199
+ },
200
+ {
201
+ "id": 4,
202
+ "type": "Florence2ModelLoader",
203
+ "pos": [
204
+ -519.1130981445312,
205
+ 328.15826416015625
206
+ ],
207
+ "size": [
208
+ 288.740234375,
209
+ 130
210
+ ],
211
+ "flags": {},
212
+ "order": 0,
213
+ "mode": 0,
214
+ "inputs": [
215
+ {
216
+ "name": "lora",
217
+ "shape": 7,
218
+ "type": "PEFTLORA",
219
+ "link": null
220
+ }
221
+ ],
222
+ "outputs": [
223
+ {
224
+ "name": "florence2_model",
225
+ "type": "FL2MODEL",
226
+ "links": [
227
+ 4
228
+ ]
229
+ }
230
+ ],
231
+ "properties": {
232
+ "cnr_id": "comfyui-florence2",
233
+ "ver": "1.0.5",
234
+ "Node name for S&R": "Florence2ModelLoader",
235
+ "widget_ue_connectable": {
236
+ "model": true,
237
+ "precision": true,
238
+ "attention": true,
239
+ "convert_to_safetensors": true
240
+ }
241
+ },
242
+ "widgets_values": [
243
+ "Florence-2-base",
244
+ "fp16",
245
+ "sdpa",
246
+ false
247
+ ]
248
+ },
249
+ {
250
+ "id": 15,
251
+ "type": "PreviewImage",
252
+ "pos": [
253
+ 812.5357055664062,
254
+ 1252.701171875
255
+ ],
256
+ "size": [
257
+ 430.5307312011719,
258
+ 246.00003051757812
259
+ ],
260
+ "flags": {},
261
+ "order": 16,
262
+ "mode": 4,
263
+ "inputs": [
264
+ {
265
+ "name": "images",
266
+ "type": "IMAGE",
267
+ "link": 18
268
+ }
269
+ ],
270
+ "outputs": [],
271
+ "properties": {
272
+ "cnr_id": "comfy-core",
273
+ "ver": "0.3.43",
274
+ "Node name for S&R": "PreviewImage",
275
+ "widget_ue_connectable": {}
276
+ },
277
+ "widgets_values": []
278
+ },
279
+ {
280
+ "id": 16,
281
+ "type": "ToDetailerPipe",
282
+ "pos": [
283
+ -89.27690887451172,
284
+ 1572.05126953125
285
+ ],
286
+ "size": [
287
+ 400,
288
+ 296
289
+ ],
290
+ "flags": {
291
+ "collapsed": false
292
+ },
293
+ "order": 12,
294
+ "mode": 4,
295
+ "inputs": [
296
+ {
297
+ "name": "model",
298
+ "type": "MODEL",
299
+ "link": 22
300
+ },
301
+ {
302
+ "name": "clip",
303
+ "type": "CLIP",
304
+ "link": 25
305
+ },
306
+ {
307
+ "name": "vae",
308
+ "type": "VAE",
309
+ "link": 26
310
+ },
311
+ {
312
+ "name": "positive",
313
+ "type": "CONDITIONING",
314
+ "link": 23
315
+ },
316
+ {
317
+ "name": "negative",
318
+ "type": "CONDITIONING",
319
+ "link": 24
320
+ },
321
+ {
322
+ "name": "bbox_detector",
323
+ "type": "BBOX_DETECTOR",
324
+ "link": 19
325
+ },
326
+ {
327
+ "name": "sam_model_opt",
328
+ "shape": 7,
329
+ "type": "SAM_MODEL",
330
+ "link": null
331
+ },
332
+ {
333
+ "name": "segm_detector_opt",
334
+ "shape": 7,
335
+ "type": "SEGM_DETECTOR",
336
+ "link": null
337
+ },
338
+ {
339
+ "name": "detailer_hook",
340
+ "shape": 7,
341
+ "type": "DETAILER_HOOK",
342
+ "link": 20
343
+ }
344
+ ],
345
+ "outputs": [
346
+ {
347
+ "name": "detailer_pipe",
348
+ "type": "DETAILER_PIPE",
349
+ "links": [
350
+ 21
351
+ ]
352
+ }
353
+ ],
354
+ "properties": {
355
+ "cnr_id": "comfyui-impact-pack",
356
+ "ver": "8.16.0",
357
+ "Node name for S&R": "ToDetailerPipe",
358
+ "aux_id": "ltdrdata/ComfyUI-Impact-Pack",
359
+ "widget_ue_connectable": {}
360
+ },
361
+ "widgets_values": [
362
+ "",
363
+ "Select the LoRA to add to the text",
364
+ "Select the Wildcard to add to the text"
365
+ ]
366
+ },
367
+ {
368
+ "id": 18,
369
+ "type": "UltralyticsDetectorProvider",
370
+ "pos": [
371
+ -484.380859375,
372
+ 1682.8277587890625
373
+ ],
374
+ "size": [
375
+ 315,
376
+ 78
377
+ ],
378
+ "flags": {},
379
+ "order": 1,
380
+ "mode": 4,
381
+ "inputs": [],
382
+ "outputs": [
383
+ {
384
+ "name": "BBOX_DETECTOR",
385
+ "type": "BBOX_DETECTOR",
386
+ "slot_index": 0,
387
+ "links": [
388
+ 19
389
+ ]
390
+ },
391
+ {
392
+ "name": "SEGM_DETECTOR",
393
+ "type": "SEGM_DETECTOR",
394
+ "slot_index": 1,
395
+ "links": []
396
+ }
397
+ ],
398
+ "properties": {
399
+ "cnr_id": "comfyui-impact-subpack",
400
+ "ver": "1.3.2",
401
+ "Node name for S&R": "UltralyticsDetectorProvider",
402
+ "aux_id": "ltdrdata/ComfyUI-Impact-Subpack",
403
+ "widget_ue_connectable": {}
404
+ },
405
+ "widgets_values": [
406
+ "bbox/face_yolov8m.pt"
407
+ ]
408
+ },
409
+ {
410
+ "id": 19,
411
+ "type": "CoreMLDetailerHookProvider",
412
+ "pos": [
413
+ -518.67529296875,
414
+ 1842.27197265625
415
+ ],
416
+ "size": [
417
+ 327.5999755859375,
418
+ 58
419
+ ],
420
+ "flags": {
421
+ "collapsed": false
422
+ },
423
+ "order": 2,
424
+ "mode": 4,
425
+ "inputs": [],
426
+ "outputs": [
427
+ {
428
+ "name": "DETAILER_HOOK",
429
+ "type": "DETAILER_HOOK",
430
+ "slot_index": 0,
431
+ "links": [
432
+ 20
433
+ ]
434
+ }
435
+ ],
436
+ "properties": {
437
+ "cnr_id": "comfyui-impact-pack",
438
+ "ver": "8.16.0",
439
+ "Node name for S&R": "CoreMLDetailerHookProvider",
440
+ "aux_id": "ltdrdata/ComfyUI-Impact-Pack",
441
+ "widget_ue_connectable": {}
442
+ },
443
+ "widgets_values": [
444
+ "512x768"
445
+ ]
446
+ },
447
+ {
448
+ "id": 17,
449
+ "type": "FaceDetailerPipe",
450
+ "pos": [
451
+ 397.5056457519531,
452
+ 1271.07275390625
453
+ ],
454
+ "size": [
455
+ 346,
456
+ 994
457
+ ],
458
+ "flags": {},
459
+ "order": 14,
460
+ "mode": 4,
461
+ "inputs": [
462
+ {
463
+ "name": "image",
464
+ "type": "IMAGE",
465
+ "link": null
466
+ },
467
+ {
468
+ "name": "detailer_pipe",
469
+ "type": "DETAILER_PIPE",
470
+ "link": 21
471
+ },
472
+ {
473
+ "name": "scheduler_func_opt",
474
+ "shape": 7,
475
+ "type": "SCHEDULER_FUNC",
476
+ "link": null
477
+ }
478
+ ],
479
+ "outputs": [
480
+ {
481
+ "name": "image",
482
+ "type": "IMAGE",
483
+ "slot_index": 0,
484
+ "links": [
485
+ 18
486
+ ]
487
+ },
488
+ {
489
+ "name": "cropped_refined",
490
+ "shape": 6,
491
+ "type": "IMAGE",
492
+ "slot_index": 1,
493
+ "links": []
494
+ },
495
+ {
496
+ "name": "cropped_enhanced_alpha",
497
+ "shape": 6,
498
+ "type": "IMAGE",
499
+ "links": null
500
+ },
501
+ {
502
+ "name": "mask",
503
+ "type": "MASK",
504
+ "links": null
505
+ },
506
+ {
507
+ "name": "detailer_pipe",
508
+ "type": "DETAILER_PIPE",
509
+ "links": null
510
+ },
511
+ {
512
+ "name": "cnet_images",
513
+ "shape": 6,
514
+ "type": "IMAGE",
515
+ "links": null
516
+ }
517
+ ],
518
+ "properties": {
519
+ "cnr_id": "comfyui-impact-pack",
520
+ "ver": "8.16.0",
521
+ "Node name for S&R": "FaceDetailerPipe",
522
+ "aux_id": "ltdrdata/ComfyUI-Impact-Pack",
523
+ "widget_ue_connectable": {}
524
+ },
525
+ "widgets_values": [
526
+ 512,
527
+ true,
528
+ 1024,
529
+ 12346,
530
+ "fixed",
531
+ 20,
532
+ 1,
533
+ "deis",
534
+ "beta",
535
+ 0.22,
536
+ 5,
537
+ true,
538
+ true,
539
+ 0.5,
540
+ 20,
541
+ 3,
542
+ "center-1",
543
+ 0,
544
+ 0.93,
545
+ 0,
546
+ 0.7,
547
+ "False",
548
+ 10,
549
+ 0.2,
550
+ 1,
551
+ false,
552
+ 20,
553
+ false,
554
+ false
555
+ ]
556
+ },
557
+ {
558
+ "id": 11,
559
+ "type": "PreviewImage",
560
+ "pos": [
561
+ 1340.685546875,
562
+ 480.72686767578125
563
+ ],
564
+ "size": [
565
+ 140,
566
+ 246
567
+ ],
568
+ "flags": {},
569
+ "order": 17,
570
+ "mode": 0,
571
+ "inputs": [
572
+ {
573
+ "name": "images",
574
+ "type": "IMAGE",
575
+ "link": 11
576
+ }
577
+ ],
578
+ "outputs": [],
579
+ "properties": {
580
+ "cnr_id": "comfy-core",
581
+ "ver": "0.3.43",
582
+ "Node name for S&R": "PreviewImage",
583
+ "widget_ue_connectable": {}
584
+ },
585
+ "widgets_values": []
586
+ },
587
+ {
588
+ "id": 2,
589
+ "type": "LoadImage",
590
+ "pos": [
591
+ -2779.779541015625,
592
+ 580.9016723632812
593
+ ],
594
+ "size": [
595
+ 274.080078125,
596
+ 314.0000305175781
597
+ ],
598
+ "flags": {},
599
+ "order": 3,
600
+ "mode": 0,
601
+ "inputs": [],
602
+ "outputs": [
603
+ {
604
+ "name": "IMAGE",
605
+ "type": "IMAGE",
606
+ "links": [
607
+ 16,
608
+ 17
609
+ ]
610
+ },
611
+ {
612
+ "name": "MASK",
613
+ "type": "MASK",
614
+ "links": null
615
+ }
616
+ ],
617
+ "properties": {
618
+ "cnr_id": "comfy-core",
619
+ "ver": "0.3.43",
620
+ "Node name for S&R": "LoadImage",
621
+ "widget_ue_connectable": {
622
+ "image": true,
623
+ "upload": true
624
+ }
625
+ },
626
+ "widgets_values": [
627
+ "ComfyUI_00286_.png",
628
+ "image"
629
+ ]
630
+ },
631
+ {
632
+ "id": 7,
633
+ "type": "DualCLIPLoader",
634
+ "pos": [
635
+ -2102.945556640625,
636
+ 192.6034698486328
637
+ ],
638
+ "size": [
639
+ 369.8720397949219,
640
+ 138.56109619140625
641
+ ],
642
+ "flags": {},
643
+ "order": 4,
644
+ "mode": 0,
645
+ "showAdvanced": true,
646
+ "inputs": [],
647
+ "outputs": [
648
+ {
649
+ "name": "CLIP",
650
+ "type": "CLIP",
651
+ "slot_index": 0,
652
+ "links": [
653
+ 6,
654
+ 12,
655
+ 25
656
+ ]
657
+ }
658
+ ],
659
+ "properties": {
660
+ "cnr_id": "comfy-core",
661
+ "ver": "0.3.18",
662
+ "Node name for S&R": "DualCLIPLoader",
663
+ "widget_ue_connectable": {}
664
+ },
665
+ "widgets_values": [
666
+ "clip_l.safetensors",
667
+ "t5xxl_fp16.safetensors",
668
+ "flux",
669
+ "cpu"
670
+ ]
671
+ },
672
+ {
673
+ "id": 6,
674
+ "type": "CLIPTextEncode",
675
+ "pos": [
676
+ -1378.50048828125,
677
+ 125.485107421875
678
+ ],
679
+ "size": [
680
+ 400,
681
+ 200
682
+ ],
683
+ "flags": {},
684
+ "order": 13,
685
+ "mode": 0,
686
+ "inputs": [
687
+ {
688
+ "name": "clip",
689
+ "type": "CLIP",
690
+ "link": 6
691
+ },
692
+ {
693
+ "name": "text",
694
+ "type": "STRING",
695
+ "widget": {
696
+ "name": "text"
697
+ },
698
+ "link": 5
699
+ }
700
+ ],
701
+ "outputs": [
702
+ {
703
+ "name": "CONDITIONING",
704
+ "type": "CONDITIONING",
705
+ "links": [
706
+ 7
707
+ ]
708
+ }
709
+ ],
710
+ "properties": {
711
+ "cnr_id": "comfy-core",
712
+ "ver": "0.3.43",
713
+ "Node name for S&R": "CLIPTextEncode",
714
+ "widget_ue_connectable": {
715
+ "text": true
716
+ }
717
+ },
718
+ "widgets_values": [
719
+ ""
720
+ ]
721
+ },
722
+ {
723
+ "id": 3,
724
+ "type": "UnetLoaderGGUF",
725
+ "pos": [
726
+ -1953.1029052734375,
727
+ 483.87322998046875
728
+ ],
729
+ "size": [
730
+ 270,
731
+ 58
732
+ ],
733
+ "flags": {},
734
+ "order": 5,
735
+ "mode": 0,
736
+ "inputs": [],
737
+ "outputs": [
738
+ {
739
+ "name": "MODEL",
740
+ "type": "MODEL",
741
+ "links": [
742
+ 2,
743
+ 22
744
+ ]
745
+ }
746
+ ],
747
+ "properties": {
748
+ "cnr_id": "ComfyUI-GGUF",
749
+ "ver": "a2b75978fd50c0227a58316619b79d525b88e570",
750
+ "Node name for S&R": "UnetLoaderGGUF",
751
+ "widget_ue_connectable": {
752
+ "unet_name": true
753
+ }
754
+ },
755
+ "widgets_values": [
756
+ "flux1-dev-Q8_0.gguf"
757
+ ]
758
+ },
759
+ {
760
+ "id": 8,
761
+ "type": "CLIPTextEncode",
762
+ "pos": [
763
+ -1502.6611328125,
764
+ 471.0852966308594
765
+ ],
766
+ "size": [
767
+ 400,
768
+ 200
769
+ ],
770
+ "flags": {},
771
+ "order": 11,
772
+ "mode": 0,
773
+ "inputs": [
774
+ {
775
+ "name": "clip",
776
+ "type": "CLIP",
777
+ "link": 12
778
+ }
779
+ ],
780
+ "outputs": [
781
+ {
782
+ "name": "CONDITIONING",
783
+ "type": "CONDITIONING",
784
+ "links": [
785
+ 8
786
+ ]
787
+ }
788
+ ],
789
+ "properties": {
790
+ "cnr_id": "comfy-core",
791
+ "ver": "0.3.43",
792
+ "Node name for S&R": "CLIPTextEncode",
793
+ "widget_ue_connectable": {
794
+ "text": true
795
+ }
796
+ },
797
+ "widgets_values": [
798
+ ""
799
+ ]
800
+ },
801
+ {
802
+ "id": 10,
803
+ "type": "UpscaleModelLoader",
804
+ "pos": [
805
+ -51.436893463134766,
806
+ 473.0013732910156
807
+ ],
808
+ "size": [
809
+ 315,
810
+ 58
811
+ ],
812
+ "flags": {},
813
+ "order": 6,
814
+ "mode": 0,
815
+ "inputs": [],
816
+ "outputs": [
817
+ {
818
+ "name": "UPSCALE_MODEL",
819
+ "type": "UPSCALE_MODEL",
820
+ "slot_index": 0,
821
+ "links": [
822
+ 10
823
+ ]
824
+ }
825
+ ],
826
+ "properties": {
827
+ "cnr_id": "comfy-core",
828
+ "ver": "0.3.39",
829
+ "Node name for S&R": "UpscaleModelLoader",
830
+ "widget_ue_connectable": {}
831
+ },
832
+ "widgets_values": [
833
+ "4x-ClearRealityV1.pth"
834
+ ]
835
+ },
836
+ {
837
+ "id": 9,
838
+ "type": "VAELoader",
839
+ "pos": [
840
+ -1591.7237548828125,
841
+ 800.7269897460938
842
+ ],
843
+ "size": [
844
+ 270,
845
+ 58
846
+ ],
847
+ "flags": {},
848
+ "order": 7,
849
+ "mode": 0,
850
+ "inputs": [],
851
+ "outputs": [
852
+ {
853
+ "name": "VAE",
854
+ "type": "VAE",
855
+ "links": [
856
+ 9,
857
+ 26
858
+ ]
859
+ }
860
+ ],
861
+ "properties": {
862
+ "cnr_id": "comfy-core",
863
+ "ver": "0.3.43",
864
+ "Node name for S&R": "VAELoader",
865
+ "widget_ue_connectable": {
866
+ "vae_name": true
867
+ }
868
+ },
869
+ "widgets_values": [
870
+ "ae.safetensors"
871
+ ]
872
+ },
873
+ {
874
+ "id": 20,
875
+ "type": "CLIPTextEncode",
876
+ "pos": [
877
+ -499.7518615722656,
878
+ 1280.487060546875
879
+ ],
880
+ "size": [
881
+ 296.4438781738281,
882
+ 90.35235595703125
883
+ ],
884
+ "flags": {},
885
+ "order": 8,
886
+ "mode": 4,
887
+ "inputs": [
888
+ {
889
+ "name": "clip",
890
+ "type": "CLIP",
891
+ "link": null
892
+ }
893
+ ],
894
+ "outputs": [
895
+ {
896
+ "name": "CONDITIONING",
897
+ "type": "CONDITIONING",
898
+ "links": [
899
+ 23
900
+ ]
901
+ }
902
+ ],
903
+ "properties": {
904
+ "cnr_id": "comfy-core",
905
+ "ver": "0.3.43",
906
+ "Node name for S&R": "CLIPTextEncode",
907
+ "widget_ue_connectable": {
908
+ "text": true
909
+ }
910
+ },
911
+ "widgets_values": [
912
+ "4k hd resolution"
913
+ ]
914
+ },
915
+ {
916
+ "id": 21,
917
+ "type": "CLIPTextEncode",
918
+ "pos": [
919
+ -483.9172058105469,
920
+ 1423.504638671875
921
+ ],
922
+ "size": [
923
+ 269.4439392089844,
924
+ 88
925
+ ],
926
+ "flags": {
927
+ "collapsed": false
928
+ },
929
+ "order": 9,
930
+ "mode": 4,
931
+ "inputs": [
932
+ {
933
+ "name": "clip",
934
+ "type": "CLIP",
935
+ "link": null
936
+ }
937
+ ],
938
+ "outputs": [
939
+ {
940
+ "name": "CONDITIONING",
941
+ "type": "CONDITIONING",
942
+ "slot_index": 0,
943
+ "links": [
944
+ 24
945
+ ]
946
+ }
947
+ ],
948
+ "title": "CLIP Text Encode (Negative Prompt)",
949
+ "properties": {
950
+ "cnr_id": "comfy-core",
951
+ "ver": "0.3.43",
952
+ "Node name for S&R": "CLIPTextEncode",
953
+ "widget_ue_connectable": {}
954
+ },
955
+ "widgets_values": [
956
+ ""
957
+ ],
958
+ "color": "#322",
959
+ "bgcolor": "#533"
960
+ }
961
+ ],
962
+ "links": [
963
+ [
964
+ 2,
965
+ 3,
966
+ 0,
967
+ 1,
968
+ 1,
969
+ "MODEL"
970
+ ],
971
+ [
972
+ 4,
973
+ 4,
974
+ 0,
975
+ 5,
976
+ 1,
977
+ "FL2MODEL"
978
+ ],
979
+ [
980
+ 5,
981
+ 5,
982
+ 2,
983
+ 6,
984
+ 1,
985
+ "STRING"
986
+ ],
987
+ [
988
+ 6,
989
+ 7,
990
+ 0,
991
+ 6,
992
+ 0,
993
+ "CLIP"
994
+ ],
995
+ [
996
+ 7,
997
+ 6,
998
+ 0,
999
+ 1,
1000
+ 2,
1001
+ "CONDITIONING"
1002
+ ],
1003
+ [
1004
+ 8,
1005
+ 8,
1006
+ 0,
1007
+ 1,
1008
+ 3,
1009
+ "CONDITIONING"
1010
+ ],
1011
+ [
1012
+ 9,
1013
+ 9,
1014
+ 0,
1015
+ 1,
1016
+ 4,
1017
+ "VAE"
1018
+ ],
1019
+ [
1020
+ 10,
1021
+ 10,
1022
+ 0,
1023
+ 1,
1024
+ 5,
1025
+ "UPSCALE_MODEL"
1026
+ ],
1027
+ [
1028
+ 11,
1029
+ 1,
1030
+ 0,
1031
+ 11,
1032
+ 0,
1033
+ "IMAGE"
1034
+ ],
1035
+ [
1036
+ 12,
1037
+ 7,
1038
+ 0,
1039
+ 8,
1040
+ 0,
1041
+ "CLIP"
1042
+ ],
1043
+ [
1044
+ 15,
1045
+ 1,
1046
+ 0,
1047
+ 14,
1048
+ 0,
1049
+ "IMAGE"
1050
+ ],
1051
+ [
1052
+ 16,
1053
+ 2,
1054
+ 0,
1055
+ 5,
1056
+ 0,
1057
+ "IMAGE"
1058
+ ],
1059
+ [
1060
+ 17,
1061
+ 2,
1062
+ 0,
1063
+ 1,
1064
+ 0,
1065
+ "IMAGE"
1066
+ ],
1067
+ [
1068
+ 18,
1069
+ 17,
1070
+ 0,
1071
+ 15,
1072
+ 0,
1073
+ "IMAGE"
1074
+ ],
1075
+ [
1076
+ 19,
1077
+ 18,
1078
+ 0,
1079
+ 16,
1080
+ 5,
1081
+ "BBOX_DETECTOR"
1082
+ ],
1083
+ [
1084
+ 20,
1085
+ 19,
1086
+ 0,
1087
+ 16,
1088
+ 8,
1089
+ "DETAILER_HOOK"
1090
+ ],
1091
+ [
1092
+ 21,
1093
+ 16,
1094
+ 0,
1095
+ 17,
1096
+ 1,
1097
+ "DETAILER_PIPE"
1098
+ ],
1099
+ [
1100
+ 22,
1101
+ 3,
1102
+ 0,
1103
+ 16,
1104
+ 0,
1105
+ "MODEL"
1106
+ ],
1107
+ [
1108
+ 23,
1109
+ 20,
1110
+ 0,
1111
+ 16,
1112
+ 3,
1113
+ "CONDITIONING"
1114
+ ],
1115
+ [
1116
+ 24,
1117
+ 21,
1118
+ 0,
1119
+ 16,
1120
+ 4,
1121
+ "CONDITIONING"
1122
+ ],
1123
+ [
1124
+ 25,
1125
+ 7,
1126
+ 0,
1127
+ 16,
1128
+ 1,
1129
+ "CLIP"
1130
+ ],
1131
+ [
1132
+ 26,
1133
+ 9,
1134
+ 0,
1135
+ 16,
1136
+ 2,
1137
+ "VAE"
1138
+ ]
1139
+ ],
1140
+ "groups": [
1141
+ {
1142
+ "id": 1,
1143
+ "title": "Face Detailer",
1144
+ "bounding": [
1145
+ -528.67529296875,
1146
+ 1179.1011962890625,
1147
+ 1781.74169921875,
1148
+ 1095.9715576171875
1149
+ ],
1150
+ "color": "#3f789e",
1151
+ "font_size": 24,
1152
+ "flags": {}
1153
+ },
1154
+ {
1155
+ "id": 2,
1156
+ "title": "Upscaler",
1157
+ "bounding": [
1158
+ -529.1130981445312,
1159
+ -362.6000061035156,
1160
+ 2019.798095703125,
1161
+ 1458.5999755859375
1162
+ ],
1163
+ "color": "#3f789e",
1164
+ "font_size": 24,
1165
+ "flags": {}
1166
+ },
1167
+ {
1168
+ "id": 3,
1169
+ "title": "Load Image",
1170
+ "bounding": [
1171
+ -2789.78076171875,
1172
+ 514.9659423828125,
1173
+ 294.080078125,
1174
+ 397.6000671386719
1175
+ ],
1176
+ "color": "#3f789e",
1177
+ "font_size": 24,
1178
+ "flags": {}
1179
+ },
1180
+ {
1181
+ "id": 4,
1182
+ "title": "Models",
1183
+ "bounding": [
1184
+ -2112.945556640625,
1185
+ 51.885108947753906,
1186
+ 1144.445068359375,
1187
+ 816.8418579101562
1188
+ ],
1189
+ "color": "#3f789e",
1190
+ "font_size": 24,
1191
+ "flags": {}
1192
+ }
1193
+ ],
1194
+ "config": {},
1195
+ "extra": {
1196
+ "ue_links": [],
1197
+ "ds": {
1198
+ "scale": 0.5209868481924372,
1199
+ "offset": [
1200
+ 2168.5665548981806,
1201
+ 396.1683530457028
1202
+ ]
1203
+ },
1204
+ "links_added_by_ue": [],
1205
+ "frontendVersion": "1.23.4",
1206
+ "VHS_latentpreview": false,
1207
+ "VHS_latentpreviewrate": 0,
1208
+ "VHS_MetadataImage": true,
1209
+ "VHS_KeepIntermediate": true
1210
+ },
1211
+ "version": 0.4
1212
+ }
ae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afc8e28272cd15db3919bacdb6918ce9c1ed22e96cb12c4d5ed0fba823529e38
3
+ size 335304388
clip_l.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:820afa79f4ba17399e740b4b5630ab74bc3a196e471d7321123efa8356b62f79
3
+ size 1710537716
config.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "florence2",
3
+ "architectures": [
4
+ "Florence2ForConditionalGeneration"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_florence2.Florence2Config",
8
+ "AutoModelForCausalLM": "modeling_florence2.Florence2ForConditionalGeneration"
9
+ },
10
+ "bos_token_id": 0,
11
+ "eos_token_id": 2,
12
+ "ignore_index": -100,
13
+ "model_type": "florence2",
14
+ "pad_token_id": 1,
15
+ "projection_dim": 768,
16
+ "text_config": {
17
+ "vocab_size": 51289,
18
+ "activation_dropout": 0.1,
19
+ "activation_function": "gelu",
20
+ "add_bias_logits": false,
21
+ "add_final_layer_norm": false,
22
+ "attention_dropout": 0.1,
23
+ "bos_token_id": 0,
24
+ "classif_dropout": 0.1,
25
+ "classifier_dropout": 0.0,
26
+ "d_model": 768,
27
+ "decoder_attention_heads": 12,
28
+ "decoder_ffn_dim": 3072,
29
+ "decoder_layerdrop": 0.0,
30
+ "decoder_layers": 6,
31
+ "decoder_start_token_id": 2,
32
+ "dropout": 0.1,
33
+ "early_stopping": true,
34
+ "encoder_attention_heads": 12,
35
+ "encoder_ffn_dim": 3072,
36
+ "encoder_layerdrop": 0.0,
37
+ "encoder_layers": 6,
38
+ "eos_token_id": 2,
39
+ "forced_eos_token_id": 2,
40
+ "forced_bos_token_id": 0,
41
+ "gradient_checkpointing": false,
42
+ "init_std": 0.02,
43
+ "is_encoder_decoder": true,
44
+ "label2id": {
45
+ "LABEL_0": 0,
46
+ "LABEL_1": 1,
47
+ "LABEL_2": 2
48
+ },
49
+ "max_position_embeddings": 1024,
50
+ "no_repeat_ngram_size": 3,
51
+ "normalize_before": false,
52
+ "num_hidden_layers": 6,
53
+ "pad_token_id": 1,
54
+ "scale_embedding": false,
55
+ "num_beams": 3
56
+ },
57
+ "vision_config": {
58
+ "model_type": "davit",
59
+ "drop_path_rate": 0.1,
60
+ "patch_size": [7, 3, 3, 3],
61
+ "patch_stride": [4, 2, 2, 2],
62
+ "patch_padding": [3, 1, 1, 1],
63
+ "patch_prenorm": [false, true, true, true],
64
+ "enable_checkpoint": false,
65
+ "dim_embed": [128, 256, 512, 1024],
66
+ "num_heads": [4, 8, 16, 32],
67
+ "num_groups": [4, 8, 16, 32],
68
+ "depths": [1, 1, 9, 1],
69
+ "window_size": 12,
70
+ "projection_dim": 768,
71
+ "visual_temporal_embedding": {
72
+ "type": "COSINE",
73
+ "max_temporal_embeddings": 100
74
+ },
75
+ "image_pos_embed": {
76
+ "type": "learned_abs_2d",
77
+ "max_pos_embeddings": 50
78
+ },
79
+ "image_feature_source": ["spatial_avg_pool", "temporal_avg_pool"]
80
+ },
81
+ "vocab_size": 51289,
82
+ "torch_dtype": "float16",
83
+ "transformers_version": "4.41.0.dev0",
84
+ "is_encoder_decoder": true
85
+ }
configuration_florence2.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import warnings
15
+ """ Florence-2 configuration"""
16
+
17
+ from typing import Optional
18
+
19
+ from transformers import AutoConfig
20
+ from transformers.configuration_utils import PretrainedConfig
21
+ from transformers.utils import logging
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ class Florence2VisionConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`Florence2VisionModel`]. It is used to instantiate a Florence2VisionModel
28
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
29
+ defaults will yield a similar configuration to that of the Florence2VisionModel architecture.
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+ Args:
35
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
36
+ The dropout rate of the drop path layer.
37
+ patch_size (`List[int]`, *optional*, defaults to [7, 3, 3, 3]):
38
+ The patch size of the image.
39
+ patch_stride (`List[int]`, *optional*, defaults to [4, 2, 2, 2]):
40
+ The patch stride of the image.
41
+ patch_padding (`List[int]`, *optional*, defaults to [3, 1, 1, 1]):
42
+ The patch padding of the image.
43
+ patch_prenorm (`List[bool]`, *optional*, defaults to [false, true, true, true]):
44
+ Whether to apply layer normalization before the patch embedding layer.
45
+ enable_checkpoint (`bool`, *optional*, defaults to False):
46
+ Whether to enable checkpointing.
47
+ dim_embed (`List[int]`, *optional*, defaults to [256, 512, 1024, 2048]):
48
+ The dimension of the embedding layer.
49
+ num_heads (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
50
+ The number of attention heads.
51
+ num_groups (`List[int]`, *optional*, defaults to [8, 16, 32, 64]):
52
+ The number of groups.
53
+ depths (`List[int]`, *optional*, defaults to [1, 1, 9, 1]):
54
+ The depth of the model.
55
+ window_size (`int`, *optional*, defaults to 12):
56
+ The window size of the model.
57
+ projection_dim (`int`, *optional*, defaults to 1024):
58
+ The dimension of the projection layer.
59
+ visual_temporal_embedding (`dict`, *optional*):
60
+ The configuration of the visual temporal embedding.
61
+ image_pos_embed (`dict`, *optional*):
62
+ The configuration of the image position embedding.
63
+ image_feature_source (`List[str]`, *optional*, defaults to ["spatial_avg_pool", "temporal_avg_pool"]):
64
+ The source of the image feature.
65
+ Example:
66
+
67
+ ```python
68
+ >>> from transformers import Florence2VisionConfig, Florence2VisionModel
69
+
70
+ >>> # Initializing a Florence2 Vision style configuration
71
+ >>> configuration = Florence2VisionConfig()
72
+
73
+ >>> # Initializing a model (with random weights)
74
+ >>> model = Florence2VisionModel(configuration)
75
+
76
+ >>> # Accessing the model configuration
77
+ >>> configuration = model.config
78
+ ```"""
79
+
80
+ model_type = "davit"
81
+ keys_to_ignore_at_inference = ["past_key_values"]
82
+
83
+ def __init__(
84
+ self,
85
+ drop_path_rate=0.1,
86
+ patch_size=[7, 3, 3, 3],
87
+ patch_stride=[4, 2, 2, 2],
88
+ patch_padding=[3, 1, 1, 1],
89
+ patch_prenorm=[False, True, True, True],
90
+ enable_checkpoint=False,
91
+ dim_embed=[256, 512, 1024, 2048],
92
+ num_heads=[8, 16, 32, 64],
93
+ num_groups=[8, 16, 32, 64],
94
+ depths=[1, 1, 9, 1],
95
+ window_size=12,
96
+ projection_dim=1024,
97
+ visual_temporal_embedding=None,
98
+ image_pos_embed=None,
99
+ image_feature_source=["spatial_avg_pool", "temporal_avg_pool"],
100
+ **kwargs,
101
+ ):
102
+ self.drop_path_rate = drop_path_rate
103
+ self.patch_size = patch_size
104
+ self.patch_stride = patch_stride
105
+ self.patch_padding = patch_padding
106
+ self.patch_prenorm = patch_prenorm
107
+ self.enable_checkpoint = enable_checkpoint
108
+ self.dim_embed = dim_embed
109
+ self.num_heads = num_heads
110
+ self.num_groups = num_groups
111
+ self.depths = depths
112
+ self.window_size = window_size
113
+ self.projection_dim = projection_dim
114
+ self.visual_temporal_embedding = visual_temporal_embedding
115
+ self.image_pos_embed = image_pos_embed
116
+ self.image_feature_source = image_feature_source
117
+
118
+ super().__init__(**kwargs)
119
+
120
+
121
+
122
+ class Florence2LanguageConfig(PretrainedConfig):
123
+ r"""
124
+ This is the configuration class to store the configuration of a [`Florence2LanguagePreTrainedModel`]. It is used to instantiate a BART
125
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
126
+ defaults will yield a similar configuration to that of the BART
127
+ [facebook/bart-large](https://huggingface.co/facebook/bart-large) architecture.
128
+
129
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
130
+ documentation from [`PretrainedConfig`] for more information.
131
+
132
+
133
+ Args:
134
+ vocab_size (`int`, *optional*, defaults to 51289):
135
+ Vocabulary size of the Florence2Language model. Defines the number of different tokens that can be represented by the
136
+ `inputs_ids` passed when calling [`Florence2LanguageModel`].
137
+ d_model (`int`, *optional*, defaults to 1024):
138
+ Dimensionality of the layers and the pooler layer.
139
+ encoder_layers (`int`, *optional*, defaults to 12):
140
+ Number of encoder layers.
141
+ decoder_layers (`int`, *optional*, defaults to 12):
142
+ Number of decoder layers.
143
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
144
+ Number of attention heads for each attention layer in the Transformer encoder.
145
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
146
+ Number of attention heads for each attention layer in the Transformer decoder.
147
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
148
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
149
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
150
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
151
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
152
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
153
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
154
+ dropout (`float`, *optional*, defaults to 0.1):
155
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
156
+ attention_dropout (`float`, *optional*, defaults to 0.0):
157
+ The dropout ratio for the attention probabilities.
158
+ activation_dropout (`float`, *optional*, defaults to 0.0):
159
+ The dropout ratio for activations inside the fully connected layer.
160
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
161
+ The dropout ratio for classifier.
162
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
163
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
164
+ just in case (e.g., 512 or 1024 or 2048).
165
+ init_std (`float`, *optional*, defaults to 0.02):
166
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
167
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
168
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
169
+ for more details.
170
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
171
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
172
+ for more details.
173
+ scale_embedding (`bool`, *optional*, defaults to `False`):
174
+ Scale embeddings by diving by sqrt(d_model).
175
+ use_cache (`bool`, *optional*, defaults to `True`):
176
+ Whether or not the model should return the last key/values attentions (not used by all models).
177
+ num_labels (`int`, *optional*, defaults to 3):
178
+ The number of labels to use in [`Florence2LanguageForSequenceClassification`].
179
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
180
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
181
+ `eos_token_id`.
182
+
183
+ Example:
184
+
185
+ ```python
186
+ >>> from transformers import Florence2LanguageConfig, Florence2LanguageModel
187
+
188
+ >>> # Initializing a Florence2 Language style configuration
189
+ >>> configuration = Florence2LanguageConfig()
190
+
191
+ >>> # Initializing a model (with random weights)
192
+ >>> model = Florence2LangaugeModel(configuration)
193
+
194
+ >>> # Accessing the model configuration
195
+ >>> configuration = model.config
196
+ ```"""
197
+
198
+ model_type = "florence2_language"
199
+ keys_to_ignore_at_inference = ["past_key_values"]
200
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
201
+
202
+ def __init__(
203
+ self,
204
+ vocab_size=51289,
205
+ max_position_embeddings=1024,
206
+ encoder_layers=12,
207
+ encoder_ffn_dim=4096,
208
+ encoder_attention_heads=16,
209
+ decoder_layers=12,
210
+ decoder_ffn_dim=4096,
211
+ decoder_attention_heads=16,
212
+ encoder_layerdrop=0.0,
213
+ decoder_layerdrop=0.0,
214
+ activation_function="gelu",
215
+ d_model=1024,
216
+ dropout=0.1,
217
+ attention_dropout=0.0,
218
+ activation_dropout=0.0,
219
+ init_std=0.02,
220
+ classifier_dropout=0.0,
221
+ scale_embedding=False,
222
+ use_cache=True,
223
+ num_labels=3,
224
+ pad_token_id=1,
225
+ bos_token_id=0,
226
+ eos_token_id=2,
227
+ is_encoder_decoder=True,
228
+ decoder_start_token_id=2,
229
+ forced_eos_token_id=2,
230
+ **kwargs,
231
+ ):
232
+ self.vocab_size = vocab_size
233
+ self.max_position_embeddings = max_position_embeddings
234
+ self.d_model = d_model
235
+ self.encoder_ffn_dim = encoder_ffn_dim
236
+ self.encoder_layers = encoder_layers
237
+ self.encoder_attention_heads = encoder_attention_heads
238
+ self.decoder_ffn_dim = decoder_ffn_dim
239
+ self.decoder_layers = decoder_layers
240
+ self.decoder_attention_heads = decoder_attention_heads
241
+ self.dropout = dropout
242
+ self.attention_dropout = attention_dropout
243
+ self.activation_dropout = activation_dropout
244
+ self.activation_function = activation_function
245
+ self.init_std = init_std
246
+ self.encoder_layerdrop = encoder_layerdrop
247
+ self.decoder_layerdrop = decoder_layerdrop
248
+ self.classifier_dropout = classifier_dropout
249
+ self.use_cache = use_cache
250
+ self.num_hidden_layers = encoder_layers
251
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
252
+
253
+ super().__init__(
254
+ num_labels=num_labels,
255
+ pad_token_id=pad_token_id,
256
+ bos_token_id=bos_token_id,
257
+ eos_token_id=eos_token_id,
258
+ is_encoder_decoder=is_encoder_decoder,
259
+ decoder_start_token_id=decoder_start_token_id,
260
+ forced_eos_token_id=forced_eos_token_id,
261
+ **kwargs,
262
+ )
263
+
264
+ # ensure backward compatibility for BART CNN models
265
+ if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
266
+ self.forced_bos_token_id = self.bos_token_id
267
+ warnings.warn(
268
+ f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
269
+ "The config can simply be saved and uploaded again to be fixed."
270
+ )
271
+
272
+ class Florence2Config(PretrainedConfig):
273
+ r"""
274
+ This is the configuration class to store the configuration of a [`Florence2ForConditionalGeneration`]. It is used to instantiate an
275
+ Florence-2 model according to the specified arguments, defining the model architecture.
276
+
277
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
278
+ documentation from [`PretrainedConfig`] for more information.
279
+
280
+ Args:
281
+ vision_config (`Florence2VisionConfig`, *optional*):
282
+ Custom vision config or dict
283
+ text_config (`Union[AutoConfig, dict]`, *optional*):
284
+ The config object of the text backbone.
285
+ ignore_index (`int`, *optional*, defaults to -100):
286
+ The ignore index for the loss function.
287
+ vocab_size (`int`, *optional*, defaults to 51289):
288
+ Vocabulary size of the Florence2model. Defines the number of different tokens that can be represented by the
289
+ `inputs_ids` passed when calling [`~Florence2ForConditionalGeneration`]
290
+ projection_dim (`int`, *optional*, defaults to 1024):
291
+ Dimension of the multimodal projection space.
292
+
293
+ Example:
294
+
295
+ ```python
296
+ >>> from transformers import Florence2ForConditionalGeneration, Florence2Config, CLIPVisionConfig, BartConfig
297
+
298
+ >>> # Initializing a clip-like vision config
299
+ >>> vision_config = CLIPVisionConfig()
300
+
301
+ >>> # Initializing a Bart config
302
+ >>> text_config = BartConfig()
303
+
304
+ >>> # Initializing a Florence-2 configuration
305
+ >>> configuration = Florence2Config(vision_config, text_config)
306
+
307
+ >>> # Initializing a model from the florence-2 configuration
308
+ >>> model = Florence2ForConditionalGeneration(configuration)
309
+
310
+ >>> # Accessing the model configuration
311
+ >>> configuration = model.config
312
+ ```"""
313
+
314
+ model_type = "florence2"
315
+ is_composition = False
316
+
317
+ def __init__(
318
+ self,
319
+ vision_config=None,
320
+ text_config=None,
321
+ ignore_index=-100,
322
+ vocab_size=51289,
323
+ projection_dim=1024,
324
+ **kwargs,
325
+ ):
326
+ self.ignore_index = ignore_index
327
+ self.vocab_size = vocab_size
328
+ self.projection_dim = projection_dim
329
+ if vision_config is not None:
330
+ vision_config = Florence2VisionConfig(**vision_config)
331
+ self.vision_config = vision_config
332
+ self.vocab_size = self.vocab_size
333
+
334
+ self.text_config = text_config
335
+ if text_config is not None:
336
+ self.text_config = Florence2LanguageConfig(**text_config)
337
+
338
+
339
+ super().__init__(**kwargs)
340
+
face_yolov8m.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f59b3d833e2ff32e194b5bb8e08d211dc7c5bdf144b90d2c8412c47ccfc83b36
3
+ size 6549796
flux1-dev-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:129032f32224bf7138f16e18673d8008ba5f84c1ec74063bf4511a8bb4cf553d
3
+ size 12708281504
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03075d2d2d2bbd3e180b9ba0afae4aa8563226e2d32911656966e05b2f2ee060
3
+ size 463221266
modeling_florence2.py ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_florence2.Florence2Processor"
4
+ },
5
+ "_valid_processor_keys": [
6
+ "images",
7
+ "do_resize",
8
+ "size",
9
+ "resample",
10
+ "do_rescale",
11
+ "rescale_factor",
12
+ "do_normalize",
13
+ "image_mean",
14
+ "image_std",
15
+ "return_tensors",
16
+ "data_format",
17
+ "input_data_format",
18
+ "do_convert_rgb"
19
+ ],
20
+ "do_convert_rgb": null,
21
+ "do_normalize": true,
22
+ "do_rescale": true,
23
+ "do_resize": true,
24
+ "do_center_crop": false,
25
+ "image_processor_type": "CLIPImageProcessor",
26
+ "image_seq_length": 577,
27
+ "image_mean": [0.485, 0.456, 0.406],
28
+ "image_std": [0.229, 0.224, 0.225],
29
+ "processor_class": "Florence2Processor",
30
+ "resample": 3,
31
+ "size": {
32
+ "height": 768,
33
+ "width":768
34
+ },
35
+ "crop_size": {
36
+ "height": 768,
37
+ "width": 768
38
+ }
39
+ }
processing_florence2.py ADDED
@@ -0,0 +1,1148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Florence-2.
17
+ """
18
+
19
+ import re
20
+ import logging
21
+ from typing import List, Optional, Union
22
+ import numpy as np
23
+ import math
24
+
25
+ import torch
26
+
27
+ from transformers.feature_extraction_utils import BatchFeature
28
+ from transformers.image_utils import ImageInput, is_valid_image
29
+ from transformers.processing_utils import ProcessorMixin
30
+ from transformers.tokenization_utils_base import (
31
+ PaddingStrategy,
32
+ PreTokenizedInput,
33
+ TextInput,
34
+ TruncationStrategy,
35
+ )
36
+ from transformers import BartTokenizer, BartTokenizerFast
37
+ from transformers.utils import TensorType
38
+
39
+
40
+ logger = logging.getLogger(__name__)
41
+
42
+ # Copied from transformers.models.idefics2.processing_idefics2.is_url
43
+ def is_url(val) -> bool:
44
+ return isinstance(val, str) and val.startswith("http")
45
+
46
+ # Copied from transformers.models.idefics2.processing_idefics2.is_image_or_image_url
47
+ def is_image_or_image_url(elem):
48
+ return is_url(elem) or is_valid_image(elem)
49
+
50
+
51
+ def _is_str_or_image(elem):
52
+ return isinstance(elem, (str)) or is_image_or_image_url(elem)
53
+
54
+
55
+ class Florence2Processor(ProcessorMixin):
56
+ r"""
57
+ Constructs a Florence2 processor which wraps a Florence2 image processor and a Florence2 tokenizer into a single processor.
58
+
59
+ [`Florence2Processor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BartTokenizerFast`]. See the
60
+ [`~Florence2Processor.__call__`] and [`~Florence2Processor.decode`] for more information.
61
+
62
+ Args:
63
+ image_processor ([`CLIPImageProcessor`], *optional*):
64
+ The image processor is a required input.
65
+ tokenizer ([`BartTokenizerFast`], *optional*):
66
+ The tokenizer is a required input.
67
+ """
68
+
69
+ attributes = ["image_processor", "tokenizer"]
70
+ image_processor_class = "CLIPImageProcessor"
71
+ tokenizer_class = ("BartTokenizer", "BartTokenizerFast")
72
+
73
+ def __init__(
74
+ self,
75
+ image_processor=None,
76
+ tokenizer=None,
77
+ ):
78
+ if image_processor is None:
79
+ raise ValueError("You need to specify an `image_processor`.")
80
+ if tokenizer is None:
81
+ raise ValueError("You need to specify a `tokenizer`.")
82
+ if not hasattr(image_processor, "image_seq_length"):
83
+ raise ValueError("Image processor is missing an `image_seq_length` attribute.")
84
+
85
+ self.image_seq_length = image_processor.image_seq_length
86
+
87
+ tokens_to_add = {
88
+ 'additional_special_tokens': \
89
+ tokenizer.additional_special_tokens + \
90
+ ['<od>', '</od>', '<ocr>', '</ocr>'] + \
91
+ [f'<loc_{x}>' for x in range(1000)] + \
92
+ ['<cap>', '</cap>', '<ncap>', '</ncap>','<dcap>', '</dcap>', '<grounding>', '</grounding>', '<seg>', '</seg>', '<sep>', '<region_cap>', '</region_cap>', '<region_to_desciption>', '</region_to_desciption>', '<proposal>', '</proposal>', '<poly>', '</poly>', '<and>']
93
+ }
94
+ tokenizer.add_special_tokens(tokens_to_add)
95
+
96
+ self.tasks_answer_post_processing_type = {
97
+ '<OCR>': 'pure_text',
98
+ '<OCR_WITH_REGION>': 'ocr',
99
+ '<CAPTION>': 'pure_text',
100
+ '<DETAILED_CAPTION>': 'pure_text',
101
+ '<MORE_DETAILED_CAPTION>': 'pure_text',
102
+ '<OD>': 'description_with_bboxes',
103
+ '<DENSE_REGION_CAPTION>': 'description_with_bboxes',
104
+ '<CAPTION_TO_PHRASE_GROUNDING>': "phrase_grounding",
105
+ '<REFERRING_EXPRESSION_SEGMENTATION>': 'polygons',
106
+ '<REGION_TO_SEGMENTATION>': 'polygons',
107
+ '<OPEN_VOCABULARY_DETECTION>': 'description_with_bboxes_or_polygons',
108
+ '<REGION_TO_CATEGORY>': 'pure_text',
109
+ '<REGION_TO_DESCRIPTION>': 'pure_text',
110
+ '<REGION_TO_OCR>': 'pure_text',
111
+ '<REGION_PROPOSAL>': 'bboxes'
112
+ }
113
+
114
+ self.task_prompts_without_inputs = {
115
+ '<OCR>': 'What is the text in the image?',
116
+ '<OCR_WITH_REGION>': 'What is the text in the image, with regions?',
117
+ '<CAPTION>': 'What does the image describe?',
118
+ '<DETAILED_CAPTION>': 'Describe in detail what is shown in the image.',
119
+ '<MORE_DETAILED_CAPTION>': 'Describe with a paragraph what is shown in the image.',
120
+ '<OD>': 'Locate the objects with category name in the image.',
121
+ '<DENSE_REGION_CAPTION>': 'Locate the objects in the image, with their descriptions.',
122
+ '<REGION_PROPOSAL>': 'Locate the region proposals in the image.'
123
+ }
124
+
125
+ self.task_prompts_with_input = {
126
+ '<CAPTION_TO_PHRASE_GROUNDING>': "Locate the phrases in the caption: {input}",
127
+ '<REFERRING_EXPRESSION_SEGMENTATION>': 'Locate {input} in the image with mask',
128
+ '<REGION_TO_SEGMENTATION>': 'What is the polygon mask of region {input}',
129
+ '<OPEN_VOCABULARY_DETECTION>': 'Locate {input} in the image.',
130
+ '<REGION_TO_CATEGORY>': 'What is the region {input}?',
131
+ '<REGION_TO_DESCRIPTION>': 'What does the region {input} describe?',
132
+ '<REGION_TO_OCR>': 'What text is in the region {input}?',
133
+ }
134
+
135
+ self.post_processor = Florence2PostProcesser(tokenizer=tokenizer)
136
+
137
+
138
+ super().__init__(image_processor, tokenizer)
139
+
140
+ def _construct_prompts(self, text):
141
+ # replace the task tokens with the task prompts if task token is in the text
142
+ prompts = []
143
+ for _text in text:
144
+ # 1. fixed task prompts without additional inputs
145
+ for task_token, task_prompt in self.task_prompts_without_inputs.items():
146
+ if task_token in _text:
147
+ assert _text == task_token, f"Task token {task_token} should be the only token in the text."
148
+ _text = task_prompt
149
+ break
150
+ # 2. task prompts with additional inputs
151
+ for task_token, task_prompt in self.task_prompts_with_input.items():
152
+ if task_token in _text:
153
+ _text = task_prompt.format(input=_text.replace(task_token, ''))
154
+ break
155
+ prompts.append(_text)
156
+ return prompts
157
+
158
+ def __call__(
159
+ self,
160
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
161
+ images: ImageInput = None,
162
+ tokenize_newline_separately: bool = True,
163
+ padding: Union[bool, str, PaddingStrategy] = False,
164
+ truncation: Union[bool, str, TruncationStrategy] = None,
165
+ max_length=None,
166
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
167
+ do_resize: bool = None,
168
+ do_normalize: bool = None,
169
+ image_mean: Optional[Union[float, List[float]]] = None,
170
+ image_std: Optional[Union[float, List[float]]] = None,
171
+ data_format: Optional["ChannelDimension"] = "channels_first", # noqa: F821
172
+ input_data_format: Optional[
173
+ Union[str, "ChannelDimension"] # noqa: F821
174
+ ] = None,
175
+ resample: "PILImageResampling" = None, # noqa: F821
176
+ do_convert_rgb: bool = None,
177
+ do_thumbnail: bool = None,
178
+ do_align_long_axis: bool = None,
179
+ do_rescale: bool = None,
180
+ ) -> BatchFeature:
181
+ """
182
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
183
+ and `kwargs` arguments to BartTokenizerFast's [`~BartTokenizerFast.__call__`] if `text` is not `None` to encode
184
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
185
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
186
+ of the above two methods for more information.
187
+
188
+ Args:
189
+ text (`str`, `List[str]`, `List[List[str]]`):
190
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
191
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
192
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
193
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
194
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
195
+ tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
196
+ number of channels, H and W are image height and width.
197
+ tokenize_newline_separately (`bool`, defaults to `True`):
198
+ Adds a separately tokenized '\n' at the end of the prompt.
199
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
200
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
201
+ index) among:
202
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
203
+ sequence if provided).
204
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
205
+ acceptable input length for the model if that argument is not provided.
206
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
207
+ lengths).
208
+ max_length (`int`, *optional*):
209
+ Maximum length of the returned list and optionally padding length (see above).
210
+ truncation (`bool`, *optional*):
211
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
212
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
213
+ If set, will return tensors of a particular framework. Acceptable values are:
214
+
215
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
216
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
217
+ - `'np'`: Return NumPy `np.ndarray` objects.
218
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
219
+
220
+ Returns:
221
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
222
+
223
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. If `suffix`
224
+ is provided, the `input_ids` will also contain the suffix input ids.
225
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
226
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
227
+ `None`).
228
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
229
+ - **labels** -- Labels compatible with training if `suffix` is not None
230
+ """
231
+
232
+ return_token_type_ids = False
233
+
234
+ if images is None:
235
+ raise ValueError("`images` are expected as arguments to a `Florence2Processor` instance.")
236
+ if text is None:
237
+ logger.warning_once(
238
+ "You are using Florence-2 without a text prompt."
239
+ )
240
+ text = ""
241
+
242
+ if isinstance(text, List) and isinstance(images, List):
243
+ if len(images) < len(text):
244
+ raise ValueError(
245
+ f"Received {len(images)} images for {len(text)} prompts. Each prompt should be associated with an image."
246
+ )
247
+ if _is_str_or_image(text):
248
+ text = [text]
249
+ elif isinstance(text, list) and _is_str_or_image(text[0]):
250
+ pass
251
+
252
+ pixel_values = self.image_processor(
253
+ images,
254
+ do_resize=do_resize,
255
+ do_normalize=do_normalize,
256
+ return_tensors=return_tensors,
257
+ image_mean=image_mean,
258
+ image_std=image_std,
259
+ input_data_format=input_data_format,
260
+ data_format=data_format,
261
+ resample=resample,
262
+ do_convert_rgb=do_convert_rgb,
263
+ )["pixel_values"]
264
+
265
+ if max_length is not None:
266
+ max_length -= self.image_seq_length # max_length has to account for the image tokens
267
+
268
+ text = self._construct_prompts(text)
269
+
270
+ inputs = self.tokenizer(
271
+ text,
272
+ return_tensors=return_tensors,
273
+ padding=padding,
274
+ max_length=max_length,
275
+ truncation=truncation,
276
+ return_token_type_ids=return_token_type_ids,
277
+ )
278
+
279
+ return_data = {**inputs, "pixel_values": pixel_values}
280
+
281
+ if return_token_type_ids:
282
+ labels = inputs["input_ids"].masked_fill(inputs["token_type_ids"] == 0, -100)
283
+ return_data.update({"labels": labels})
284
+ return BatchFeature(data=return_data)
285
+
286
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Florence2
287
+ def batch_decode(self, *args, **kwargs):
288
+ """
289
+ This method forwards all its arguments to BartTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
290
+ refer to the docstring of this method for more information.
291
+ """
292
+ return self.tokenizer.batch_decode(*args, **kwargs)
293
+
294
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Florence2
295
+ def decode(self, *args, **kwargs):
296
+ """
297
+ This method forwards all its arguments to BartTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
298
+ the docstring of this method for more information.
299
+ """
300
+ return self.tokenizer.decode(*args, **kwargs)
301
+
302
+ @property
303
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names with CLIP->Florence2
304
+ def model_input_names(self):
305
+ tokenizer_input_names = self.tokenizer.model_input_names
306
+ image_processor_input_names = self.image_processor.model_input_names
307
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
308
+
309
+ def post_process_generation(self, text=None, sequence=None, transition_beam_score=None, task=None, image_size=None):
310
+ """
311
+ Post-process the output of the model to each of the task outputs.
312
+
313
+ Args:
314
+ text (`str`): The text to post-process.
315
+ task (`str`): The task to post-process the text for.
316
+ image_size (`Tuple[int, int]`): The size of the image. height x width.
317
+ """
318
+
319
+ task_answer_post_processing_type = self.tasks_answer_post_processing_type.get(task, 'pure_text')
320
+ task_answer = self.post_processor(
321
+ text=text,
322
+ sequence=sequence,
323
+ transition_beam_score=transition_beam_score,
324
+ image_size=image_size,
325
+ parse_tasks=task_answer_post_processing_type,
326
+ )[task_answer_post_processing_type]
327
+
328
+ if task_answer_post_processing_type == 'pure_text':
329
+ final_answer = task_answer
330
+ # remove the special tokens
331
+ final_answer = final_answer.replace('<s>', '').replace('</s>', '')
332
+ elif task_answer_post_processing_type in ['od', 'description_with_bboxes', 'bboxes']:
333
+ od_instances = task_answer
334
+ bboxes_od = [_od_instance['bbox'] for _od_instance in od_instances]
335
+ labels_od = [str(_od_instance['cat_name']) for _od_instance in od_instances]
336
+ final_answer = {'bboxes': bboxes_od, 'labels': labels_od}
337
+ if len(od_instances) and 'score' in od_instances[0]:
338
+ scores_od = [_od_instance['score'] for _od_instance in od_instances]
339
+ final_answer['scores'] = scores_od
340
+ elif task_answer_post_processing_type in ['ocr']:
341
+ bboxes = [_od_instance['quad_box'] for _od_instance in task_answer]
342
+ labels = [str(_od_instance['text']) for _od_instance in task_answer]
343
+ final_answer = {'quad_boxes': bboxes, 'labels': labels}
344
+ elif task_answer_post_processing_type in ['phrase_grounding']:
345
+ bboxes = []
346
+ labels = []
347
+ for _grounded_phrase in task_answer:
348
+ for _bbox in _grounded_phrase['bbox']:
349
+ bboxes.append(_bbox)
350
+ labels.append(_grounded_phrase['cat_name'])
351
+ final_answer = {'bboxes': bboxes, 'labels': labels}
352
+ elif task_answer_post_processing_type in ['description_with_polygons', 'polygons']:
353
+ labels = []
354
+ polygons = []
355
+ for result in task_answer:
356
+ label = result['cat_name']
357
+ _polygons = result['polygons']
358
+ labels.append(label)
359
+ polygons.append(_polygons)
360
+ final_answer = {'polygons': polygons, 'labels': labels}
361
+ elif task_answer_post_processing_type in ['description_with_bboxes_or_polygons']:
362
+ bboxes = []
363
+ bboxes_labels = []
364
+ polygons = []
365
+ polygons_labels = []
366
+ for result in task_answer:
367
+ label = result['cat_name']
368
+ if 'polygons' in result:
369
+ _polygons = result['polygons']
370
+ polygons.append(_polygons)
371
+ polygons_labels.append(label)
372
+ else:
373
+ _bbox = result['bbox']
374
+ bboxes.append(_bbox)
375
+ bboxes_labels.append(label)
376
+ final_answer = {'bboxes': bboxes, 'bboxes_labels': bboxes_labels, 'polygons': polygons, 'polygons_labels': polygons_labels}
377
+ else:
378
+ raise ValueError('Unknown task answer post processing type: {}'.format(task_answer_post_processing_type))
379
+
380
+ final_answer = {
381
+ task: final_answer}
382
+ return final_answer
383
+
384
+ class BoxQuantizer(object):
385
+ def __init__(self, mode, bins):
386
+ self.mode = mode
387
+ self.bins = bins
388
+
389
+ def quantize(self, boxes: torch.Tensor, size):
390
+ bins_w, bins_h = self.bins # Quantization bins.
391
+ size_w, size_h = size # Original image size.
392
+ size_per_bin_w = size_w / bins_w
393
+ size_per_bin_h = size_h / bins_h
394
+ xmin, ymin, xmax, ymax = boxes.split(1, dim=-1) # Shape: 4 * [N, 1].
395
+
396
+ if self.mode == 'floor':
397
+ quantized_xmin = (
398
+ xmin / size_per_bin_w).floor().clamp(0, bins_w - 1)
399
+ quantized_ymin = (
400
+ ymin / size_per_bin_h).floor().clamp(0, bins_h - 1)
401
+ quantized_xmax = (
402
+ xmax / size_per_bin_w).floor().clamp(0, bins_w - 1)
403
+ quantized_ymax = (
404
+ ymax / size_per_bin_h).floor().clamp(0, bins_h - 1)
405
+
406
+ elif self.mode == 'round':
407
+ raise NotImplementedError()
408
+
409
+ else:
410
+ raise ValueError('Incorrect quantization type.')
411
+
412
+ quantized_boxes = torch.cat(
413
+ (quantized_xmin, quantized_ymin, quantized_xmax, quantized_ymax), dim=-1
414
+ ).int()
415
+
416
+ return quantized_boxes
417
+
418
+ def dequantize(self, boxes: torch.Tensor, size):
419
+ bins_w, bins_h = self.bins # Quantization bins.
420
+ size_w, size_h = size # Original image size.
421
+ size_per_bin_w = size_w / bins_w
422
+ size_per_bin_h = size_h / bins_h
423
+ xmin, ymin, xmax, ymax = boxes.split(1, dim=-1) # Shape: 4 * [N, 1].
424
+
425
+ if self.mode == 'floor':
426
+ # Add 0.5 to use the center position of the bin as the coordinate.
427
+ dequantized_xmin = (xmin + 0.5) * size_per_bin_w
428
+ dequantized_ymin = (ymin + 0.5) * size_per_bin_h
429
+ dequantized_xmax = (xmax + 0.5) * size_per_bin_w
430
+ dequantized_ymax = (ymax + 0.5) * size_per_bin_h
431
+
432
+ elif self.mode == 'round':
433
+ raise NotImplementedError()
434
+
435
+ else:
436
+ raise ValueError('Incorrect quantization type.')
437
+
438
+ dequantized_boxes = torch.cat(
439
+ (dequantized_xmin, dequantized_ymin,
440
+ dequantized_xmax, dequantized_ymax), dim=-1
441
+ )
442
+
443
+ return dequantized_boxes
444
+
445
+
446
+ class CoordinatesQuantizer(object):
447
+ """
448
+ Quantize coornidates (Nx2)
449
+ """
450
+
451
+ def __init__(self, mode, bins):
452
+ self.mode = mode
453
+ self.bins = bins
454
+
455
+ def quantize(self, coordinates: torch.Tensor, size):
456
+ bins_w, bins_h = self.bins # Quantization bins.
457
+ size_w, size_h = size # Original image size.
458
+ size_per_bin_w = size_w / bins_w
459
+ size_per_bin_h = size_h / bins_h
460
+ assert coordinates.shape[-1] == 2, 'coordinates should be shape (N, 2)'
461
+ x, y = coordinates.split(1, dim=-1) # Shape: 4 * [N, 1].
462
+
463
+ if self.mode == 'floor':
464
+ quantized_x = (x / size_per_bin_w).floor().clamp(0, bins_w - 1)
465
+ quantized_y = (y / size_per_bin_h).floor().clamp(0, bins_h - 1)
466
+
467
+ elif self.mode == 'round':
468
+ raise NotImplementedError()
469
+
470
+ else:
471
+ raise ValueError('Incorrect quantization type.')
472
+
473
+ quantized_coordinates = torch.cat(
474
+ (quantized_x, quantized_y), dim=-1
475
+ ).int()
476
+
477
+ return quantized_coordinates
478
+
479
+ def dequantize(self, coordinates: torch.Tensor, size):
480
+ bins_w, bins_h = self.bins # Quantization bins.
481
+ size_w, size_h = size # Original image size.
482
+ size_per_bin_w = size_w / bins_w
483
+ size_per_bin_h = size_h / bins_h
484
+ assert coordinates.shape[-1] == 2, 'coordinates should be shape (N, 2)'
485
+ x, y = coordinates.split(1, dim=-1) # Shape: 4 * [N, 1].
486
+
487
+ if self.mode == 'floor':
488
+ # Add 0.5 to use the center position of the bin as the coordinate.
489
+ dequantized_x = (x + 0.5) * size_per_bin_w
490
+ dequantized_y = (y + 0.5) * size_per_bin_h
491
+
492
+ elif self.mode == 'round':
493
+ raise NotImplementedError()
494
+
495
+ else:
496
+ raise ValueError('Incorrect quantization type.')
497
+
498
+ dequantized_coordinates = torch.cat(
499
+ (dequantized_x, dequantized_y), dim=-1
500
+ )
501
+
502
+ return dequantized_coordinates
503
+
504
+
505
+ class Florence2PostProcesser(object):
506
+ r"""
507
+ Florence-2 post process for converting text prediction to various tasks results.
508
+
509
+ Args:
510
+ config: A dict of configs.
511
+ tokenizer: A tokenizer for decoding text to spans.
512
+ sample config:
513
+ UNIFIED_POST_PROCESS:
514
+ # commom configs
515
+ NUM_BBOX_HEIGHT_BINS: 1000
516
+ NUM_BBOX_WIDTH_BINS: 1000
517
+ COORDINATES_HEIGHT_BINS: 1000
518
+ COORDINATES_WIDTH_BINS: 1000
519
+ # task specific configs, override the common configs
520
+ PRASE_TASKS:
521
+ - TASK_NAME: 'video_dense_caption'
522
+ PATTERN: 'r<time_(\d+)><time_(\d+)>([a-zA-Z0-9 ]+)'
523
+ SCORE_MODE: 'avg_cat_name_scores'
524
+ NUM_BINS: 100
525
+ - TASK_NAME: 'od'
526
+ PATTERN: 'r<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>([a-zA-Z0-9 ]+)'
527
+ SCORE_MODE: 'avg_cat_name_scores'
528
+
529
+ Returns:
530
+ parsed_dict (dict): A dict of parsed results.
531
+ """
532
+ def __init__(
533
+ self,
534
+ tokenizer=None
535
+ ):
536
+ parse_tasks = []
537
+ parse_task_configs = {}
538
+ config = self._create_default_config()
539
+ for task in config['PARSE_TASKS']:
540
+ parse_tasks.append(task['TASK_NAME'])
541
+ parse_task_configs[task['TASK_NAME']] = task
542
+
543
+ self.config = config
544
+ self.parse_tasks = parse_tasks
545
+ self.parse_tasks_configs = parse_task_configs
546
+
547
+ self.tokenizer = tokenizer
548
+ if self.tokenizer is not None:
549
+ self.all_special_tokens = set(self.tokenizer.all_special_tokens)
550
+
551
+ self.init_quantizers()
552
+ self.black_list_of_phrase_grounding = self._create_black_list_of_phrase_grounding()
553
+
554
+ def _create_black_list_of_phrase_grounding(self):
555
+ black_list = {}
556
+
557
+ if 'phrase_grounding' in self.parse_tasks and self.parse_tasks_configs['phrase_grounding']['FILTER_BY_BLACK_LIST']:
558
+ black_list = set(
559
+ ['it', 'I', 'me', 'mine',
560
+ 'you', 'your', 'yours',
561
+ 'he', 'him', 'his',
562
+ 'she', 'her', 'hers',
563
+ 'they', 'them', 'their', 'theirs',
564
+ 'one', 'oneself',
565
+ 'we', 'us', 'our', 'ours',
566
+ 'you', 'your', 'yours',
567
+ 'they', 'them', 'their', 'theirs',
568
+ 'mine', 'yours', 'his', 'hers', 'its',
569
+ 'ours', 'yours', 'theirs',
570
+ 'myself', 'yourself', 'himself', 'herself', 'itself',
571
+ 'ourselves', 'yourselves', 'themselves',
572
+ 'this', 'that',
573
+ 'these', 'those',
574
+ 'who', 'whom', 'whose', 'which', 'what',
575
+ 'who', 'whom', 'whose', 'which', 'that',
576
+ 'all', 'another', 'any', 'anybody', 'anyone', 'anything',
577
+ 'each', 'everybody', 'everyone', 'everything',
578
+ 'few', 'many', 'nobody', 'none', 'one', 'several',
579
+ 'some', 'somebody', 'someone', 'something',
580
+ 'each other', 'one another',
581
+ 'myself', 'yourself', 'himself', 'herself', 'itself',
582
+ 'ourselves', 'yourselves', 'themselves',
583
+ 'the image', 'image', 'images', 'the', 'a', 'an', 'a group',
584
+ 'other objects', 'lots', 'a set',
585
+ ]
586
+ )
587
+
588
+ return black_list
589
+
590
+ def _create_default_config(self):
591
+ config = {
592
+ 'NUM_BBOX_HEIGHT_BINS': 1000,
593
+ 'NUM_BBOX_WIDTH_BINS': 1000,
594
+ 'BOX_QUANTIZATION_MODE': 'floor',
595
+ 'COORDINATES_HEIGHT_BINS': 1000,
596
+ 'COORDINATES_WIDTH_BINS': 1000,
597
+ 'COORDINATES_QUANTIZATION_MODE': 'floor',
598
+ 'PARSE_TASKS': [
599
+ {
600
+ 'TASK_NAME': 'od',
601
+ 'PATTERN': r'([a-zA-Z0-9 ]+)<loc_(\\d+)><loc_(\\d+)><loc_(\\d+)><loc_(\\d+)>',
602
+ 'SCORE_MODE': 'avg_loc_scores'
603
+ },
604
+ {
605
+ 'TASK_NAME': 'ocr',
606
+ 'PATTERN': r'(.+?)<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>',
607
+ 'AREA_THRESHOLD': 0.00
608
+ },
609
+ {
610
+ 'TASK_NAME': 'phrase_grounding',
611
+ 'FILTER_BY_BLACK_LIST': True
612
+ },
613
+ {
614
+ 'TASK_NAME': 'pure_text',
615
+ },
616
+ {
617
+ 'TASK_NAME': 'description_with_bboxes',
618
+ 'SCORE_MODE': 'avg_loc_scores'
619
+ },
620
+ {
621
+ 'TASK_NAME': 'description_with_polygons',
622
+ },
623
+ {
624
+ 'TASK_NAME': 'polygons',
625
+ },
626
+ {
627
+ 'TASK_NAME': 'bboxes',
628
+ },
629
+ {
630
+ 'TASK_NAME': 'description_with_bboxes_or_polygons',
631
+ }
632
+ ]
633
+ }
634
+
635
+ return config
636
+
637
+ def init_quantizers(self):
638
+ # we have box_quantizer (od, grounding) and coordinates_quantizer (ocr, referring_segmentation)
639
+ num_bbox_height_bins = self.config.get('NUM_BBOX_HEIGHT_BINS', 1000)
640
+ num_bbox_width_bins = self.config.get('NUM_BBOX_WIDTH_BINS', 1000)
641
+ box_quantization_mode = self.config.get('BOX_QUANTIZATION_MODE', 'floor')
642
+ self.box_quantizer = BoxQuantizer(
643
+ box_quantization_mode,
644
+ (num_bbox_width_bins, num_bbox_height_bins),
645
+ )
646
+
647
+ num_bbox_height_bins = self.config['COORDINATES_HEIGHT_BINS'] if 'COORDINATES_HEIGHT_BINS' in self.config else self.config.get('NUM_BBOX_HEIGHT_BINS', 1000)
648
+ num_bbox_width_bins = self.config['COORDINATES_WIDTH_BINS'] if 'COORDINATES_WIDTH_BINS' in self.config else self.config.get('NUM_BBOX_WIDTH_BINS', 1000)
649
+ box_quantization_mode = self.config.get('COORDINATES_QUANTIZATION_MODE') if 'COORDINATES_QUANTIZATION_MODE' in self.config else self.config.get('BOX_QUANTIZATION_MODE', 'floor')
650
+ self.coordinates_quantizer = CoordinatesQuantizer(
651
+ box_quantization_mode,
652
+ (num_bbox_width_bins, num_bbox_height_bins),
653
+ )
654
+
655
+ def decode_with_spans(self, tokenizer, token_ids):
656
+ filtered_tokens = tokenizer.convert_ids_to_tokens(
657
+ token_ids, skip_special_tokens=False)
658
+ assert len(filtered_tokens) == len(token_ids)
659
+
660
+ sub_texts = []
661
+ for token in filtered_tokens:
662
+ if token in self.all_special_tokens:
663
+ sub_texts.append(token)
664
+ else:
665
+ if isinstance(tokenizer, (BartTokenizer, BartTokenizerFast)):
666
+ sub_text = tokenizer.convert_tokens_to_string([token])
667
+ else:
668
+ raise ValueError(f'type {type(tokenizer)} not supported')
669
+ sub_texts.append(sub_text)
670
+
671
+ text = ''
672
+ spans = []
673
+ for sub_text in sub_texts:
674
+ span = (len(text), len(text) + len(sub_text)) # [start index, end index).
675
+ text += sub_text
676
+ spans.append(span)
677
+
678
+ return text, spans
679
+
680
+ def parse_od_from_text_and_spans(
681
+ self,
682
+ text,
683
+ pattern,
684
+ image_size,
685
+ phrase_centric=False
686
+ ):
687
+ parsed = list(re.finditer(pattern, text))
688
+
689
+ instances = []
690
+ for i in range(len(parsed)):
691
+ # Prepare instance.
692
+ instance = {}
693
+
694
+ if phrase_centric:
695
+ bbox_bins = [int(parsed[i].group(j)) for j in range(2, 6)]
696
+ else:
697
+ bbox_bins = [int(parsed[i].group(j)) for j in range(1, 5)]
698
+ instance['bbox'] = self.box_quantizer.dequantize(
699
+ boxes=torch.tensor(bbox_bins),
700
+ size=image_size
701
+ ).tolist()
702
+
703
+ if phrase_centric:
704
+ instance['cat_name'] = parsed[i].group(1).lower().strip()
705
+ else:
706
+ instance['cat_name'] = parsed[i].group(5).lower().strip()
707
+ instances.append(instance)
708
+
709
+ return instances
710
+
711
+ def parse_ocr_from_text_and_spans(self,
712
+ text,
713
+ pattern,
714
+ image_size,
715
+ area_threshold=-1.0,
716
+ ):
717
+ bboxes = []
718
+ labels = []
719
+ text = text.replace('<s>', '')
720
+ # ocr with regions
721
+ parsed = re.findall(pattern, text)
722
+ instances = []
723
+ image_width, image_height = image_size
724
+
725
+ for ocr_line in parsed:
726
+ ocr_content = ocr_line[0]
727
+ quad_box = ocr_line[1:]
728
+ quad_box = [int(i) for i in quad_box]
729
+ quad_box = self.coordinates_quantizer.dequantize(
730
+ torch.tensor(np.array(quad_box).reshape(-1, 2)),
731
+ size=image_size
732
+ ).reshape(-1).tolist()
733
+
734
+ if area_threshold > 0:
735
+ x_coords = [i for i in quad_box[0::2]]
736
+ y_coords = [i for i in quad_box[1::2]]
737
+
738
+ # apply the Shoelace formula
739
+ area = 0.5 * abs(sum(x_coords[i] * y_coords[i + 1] - x_coords[i + 1] * y_coords[i] for i in range(4 - 1)))
740
+
741
+ if area < (image_width * image_height) * area_threshold:
742
+ continue
743
+
744
+ bboxes.append(quad_box)
745
+ labels.append(ocr_content)
746
+ instances.append({
747
+ 'quad_box': quad_box,
748
+ 'text': ocr_content,
749
+ })
750
+ return instances
751
+
752
+ def parse_phrase_grounding_from_text_and_spans(self, text, pattern, image_size):
753
+ # ignore <s> </s> and <pad>
754
+ cur_span = 0
755
+ if text.startswith('<s>'):
756
+ cur_span += 3
757
+
758
+ text = text.replace('<s>', '')
759
+ text = text.replace('</s>', '')
760
+ text = text.replace('<pad>', '')
761
+
762
+ pattern = r"([^<]+(?:<loc_\d+>){4,})"
763
+ phrases = re.findall(pattern, text)
764
+
765
+ # pattern should be text pattern and od pattern
766
+ pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_)'
767
+ box_pattern = r'<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>'
768
+
769
+ instances = []
770
+ for pharse_text in phrases:
771
+ phrase_text_strip = pharse_text.replace('<ground>', '', 1)
772
+ phrase_text_strip = pharse_text.replace('<obj>', '', 1)
773
+
774
+ if phrase_text_strip == '':
775
+ cur_span += len(pharse_text)
776
+ continue
777
+
778
+ # Prepare instance.
779
+ instance = {}
780
+
781
+ # parse phrase, get string
782
+ phrase = re.search(pattern, phrase_text_strip)
783
+ if phrase is None:
784
+ cur_span += len(pharse_text)
785
+ continue
786
+
787
+ # parse bboxes by box_pattern
788
+ bboxes_parsed = list(re.finditer(box_pattern, pharse_text))
789
+ if len(bboxes_parsed) == 0:
790
+ cur_span += len(pharse_text)
791
+ continue
792
+
793
+ phrase = phrase.group()
794
+ # remove leading and trailing spaces
795
+ phrase = phrase.strip()
796
+
797
+ if phrase in self.black_list_of_phrase_grounding:
798
+ cur_span += len(pharse_text)
799
+ continue
800
+
801
+ # a list of list
802
+ bbox_bins = [[int(_bboxes_parsed.group(j)) for j in range(1, 5)] for _bboxes_parsed in bboxes_parsed]
803
+ instance['bbox'] = self.box_quantizer.dequantize(
804
+ boxes=torch.tensor(bbox_bins),
805
+ size=image_size
806
+ ).tolist()
807
+
808
+ # exclude non-ascii characters
809
+ phrase = phrase.encode('ascii',errors='ignore').decode('ascii')
810
+ instance['cat_name'] = phrase
811
+
812
+ instances.append(instance)
813
+
814
+ return instances
815
+
816
+ def parse_description_with_bboxes_from_text_and_spans(
817
+ self,
818
+ text,
819
+ spans=None,
820
+ scores=None,
821
+ score_mode=None,
822
+ pattern=None,
823
+ image_size=None,
824
+ allow_empty_phrase=False
825
+ ):
826
+ def find_matched_token_indices(cur_span, token_spans):
827
+ inds = []
828
+ for i, token_span in enumerate(token_spans):
829
+ if not (token_span[1] <= cur_span[0] or token_span[0] >= cur_span[1]):
830
+ inds.append(i)
831
+ return inds
832
+
833
+ cur_span = 0
834
+ if text.startswith('<s>'):
835
+ cur_span += 3
836
+
837
+ text = text.replace('<s>', '')
838
+ text = text.replace('</s>', '')
839
+ text = text.replace('<pad>', '')
840
+
841
+ if allow_empty_phrase:
842
+ pattern = rf"(?:(?:<loc_\d+>){{4,}})"
843
+ else:
844
+ pattern = r"([^<]+(?:<loc_\d+>){4,})"
845
+ phrases = re.findall(pattern, text)
846
+
847
+ # pattern should be text pattern and od pattern
848
+ pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_)'
849
+ box_pattern = r'<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>'
850
+
851
+ instances = []
852
+ for pharse_text in phrases:
853
+ phrase_text_strip = pharse_text.replace('<ground>', '', 1)
854
+ phrase_text_strip = pharse_text.replace('<obj>', '', 1)
855
+
856
+ if phrase_text_strip == '' and not allow_empty_phrase:
857
+ cur_span += len(pharse_text)
858
+ continue
859
+
860
+ # parse phrase, get string
861
+ phrase = re.search(pattern, phrase_text_strip)
862
+ if phrase is None:
863
+ cur_span += len(pharse_text)
864
+ continue
865
+
866
+ phrase_span = phrase.span()
867
+ phrase = phrase.group()
868
+ # remove leading and trailing spaces
869
+ phrase = phrase.strip()
870
+
871
+ # parse bboxes by box_pattern
872
+ bboxes_parsed = list(re.finditer(box_pattern, pharse_text))
873
+ if len(bboxes_parsed) == 0:
874
+ cur_span += len(pharse_text)
875
+ continue
876
+
877
+ # a list of list
878
+ bbox_bins = [[int(_bboxes_parsed.group(j)) for j in range(1, 5)] for _bboxes_parsed in bboxes_parsed]
879
+
880
+ bboxes = self.box_quantizer.dequantize(
881
+ boxes=torch.tensor(bbox_bins),
882
+ size=image_size
883
+ ).tolist()
884
+
885
+ if score_mode == 'avg_loc_scores':
886
+ if spans is None or scores is None:
887
+ all_scores = None
888
+ else:
889
+ bbox_end_spans = [_bboxes_parsed.span(0) for _bboxes_parsed in bboxes_parsed]
890
+ all_scores = []
891
+ for _spans in bbox_end_spans:
892
+ token_inds = find_matched_token_indices((_spans[0] + cur_span, _spans[1]+ cur_span), spans)
893
+ loc_scores = [scores[token_i] for token_i in token_inds]
894
+ score = sum(loc_scores) / len(loc_scores)
895
+ all_scores.append(score)
896
+ elif score_mode == 'avg_cat_name_scores':
897
+ if spans is None or scores is None:
898
+ all_scores = None
899
+ else:
900
+ cat_name_token_inds = find_matched_token_indices((phrase_span[0] + cur_span, phrase_span[1]+cur_span), spans)
901
+ cat_name_scores = [scores[token_i] for token_i in cat_name_token_inds]
902
+ score = sum(cat_name_scores) / len(cat_name_scores)
903
+ all_scores = [score] * len(bboxes)
904
+ elif score_mode is None:
905
+ all_scores = None
906
+ else:
907
+ raise ValueError('Unknown score mode: {}'.format(score_mode))
908
+
909
+ phrase = phrase.encode('ascii',errors='ignore').decode('ascii')
910
+ for _idx, _bboxes in enumerate(bboxes):
911
+ # Prepare instance.
912
+ instance = {}
913
+ instance['bbox'] = _bboxes
914
+ # exclude non-ascii characters
915
+ instance['cat_name'] = phrase
916
+ if all_scores is not None:
917
+ instance['score'] = math.exp(all_scores[_idx])
918
+ instances.append(instance)
919
+
920
+ cur_span += len(pharse_text)
921
+
922
+ return instances
923
+
924
+ def parse_description_with_polygons_from_text_and_spans(self, text, pattern, image_size,
925
+ allow_empty_phrase=False,
926
+ polygon_sep_token='<sep>',
927
+ polygon_start_token='<poly>',
928
+ polygon_end_token='</poly>',
929
+ with_box_at_start=False,
930
+ ):
931
+
932
+ # ref_seg format: '<expression><x1><y1><x2><y2><><><sep><><><><>'
933
+ # ignore <s> </s> and <pad>
934
+
935
+ text = text.replace('<s>', '')
936
+ text = text.replace('</s>', '')
937
+ text = text.replace('<pad>', '')
938
+
939
+ if allow_empty_phrase:
940
+ pattern = rf"(?:(?:<loc_\d+>|{re.escape(polygon_sep_token)}|{re.escape(polygon_start_token)}|{re.escape(polygon_end_token)}){{4,}})"
941
+ else:
942
+ # [^<]+: This part matches one or more characters that are not the < symbol.
943
+ # The ^ inside the square brackets [] is a negation, meaning it matches anything except <.
944
+ #
945
+ pattern = rf"([^<]+(?:<loc_\d+>|{re.escape(polygon_sep_token)}|{re.escape(polygon_start_token)}|{re.escape(polygon_end_token)}){{4,}})"
946
+ phrases = re.findall(pattern, text)
947
+
948
+ phrase_string_pattern = r'^\s*(.*?)(?=<od>|</od>|<box>|</box>|<bbox>|</bbox>|<loc_|<poly>)'
949
+ box_pattern = rf'((?:<loc_\d+>)+)(?:{re.escape(polygon_sep_token)}|$)'
950
+
951
+ # one polygons instance is separated by polygon_start_token and polygon_end_token
952
+ polygons_instance_pattern = rf'{re.escape(polygon_start_token)}(.*?){re.escape(polygon_end_token)}'
953
+
954
+ instances = []
955
+ for phrase_text in phrases:
956
+
957
+ # exclude loc_\d+>
958
+ # need to get span if want to include category score
959
+ phrase_text_strip = re.sub(r'^loc_\d+>', '', phrase_text, count=1)
960
+
961
+ # phrase = phrase.replace('<poly>', '')
962
+ # phrase = phrase.replace('poly>', '')
963
+
964
+ if phrase_text_strip == '' and not allow_empty_phrase:
965
+ continue
966
+
967
+
968
+ # parse phrase, get string
969
+ phrase = re.search(phrase_string_pattern, phrase_text_strip)
970
+ if phrase is None:
971
+ continue
972
+ phrase = phrase.group()
973
+ # remove leading and trailing spaces
974
+ phrase = phrase.strip()
975
+
976
+ # parse bboxes by box_pattern
977
+
978
+ # split by polygon_start_token and polygon_end_token first using polygons_instance_pattern
979
+ if polygon_start_token in phrase_text and polygon_end_token in phrase_text:
980
+ polygons_instances_parsed = list(re.finditer(polygons_instance_pattern, phrase_text))
981
+ else:
982
+ polygons_instances_parsed = [phrase_text]
983
+
984
+ for _polygons_instances_parsed in polygons_instances_parsed:
985
+ # Prepare instance.
986
+ instance = {}
987
+
988
+ # polygons_parsed= list(re.finditer(box_pattern, phrase_text))
989
+ if isinstance(_polygons_instances_parsed, str):
990
+ polygons_parsed= list(re.finditer(box_pattern, _polygons_instances_parsed))
991
+ else:
992
+ polygons_parsed= list(re.finditer(box_pattern, _polygons_instances_parsed.group(1)))
993
+ if len(polygons_parsed) == 0:
994
+ continue
995
+
996
+ # a list of list (polygon)
997
+ bbox = []
998
+ polygons = []
999
+ for _polygon_parsed in polygons_parsed:
1000
+ # group 1: whole <loc_\d+>...</loc_\d+>
1001
+ _polygon = _polygon_parsed.group(1)
1002
+ # parse into list of int
1003
+ _polygon = [int(_loc_parsed.group(1)) for _loc_parsed in re.finditer(r'<loc_(\d+)>', _polygon)]
1004
+ if with_box_at_start and len(bbox) == 0:
1005
+ if len(_polygon) > 4:
1006
+ # no valid bbox prediction
1007
+ bbox = _polygon[:4]
1008
+ _polygon = _polygon[4:]
1009
+ else:
1010
+ bbox = [0, 0, 0, 0]
1011
+ # abandon last element if is not paired
1012
+ if len(_polygon) % 2 == 1:
1013
+ _polygon = _polygon[:-1]
1014
+
1015
+ # reshape into (n, 2)
1016
+ _polygon = self.coordinates_quantizer.dequantize(
1017
+ torch.tensor(np.array(_polygon).reshape(-1, 2)),
1018
+ size=image_size
1019
+ ).reshape(-1).tolist()
1020
+ # reshape back
1021
+ polygons.append(_polygon)
1022
+
1023
+ instance['cat_name'] = phrase
1024
+ instance['polygons'] = polygons
1025
+ if len(bbox) != 0:
1026
+ instance['bbox'] = self.box_quantizer.dequantize(
1027
+ boxes=torch.tensor([bbox]),
1028
+ size=image_size
1029
+ ).tolist()[0]
1030
+
1031
+ instances.append(instance)
1032
+
1033
+ return instances
1034
+
1035
+ def __call__(
1036
+ self,
1037
+ text=None,
1038
+ sequence=None,
1039
+ transition_beam_score=None,
1040
+ image_size=None,
1041
+ parse_tasks=None,
1042
+ ):
1043
+ """
1044
+ Args:
1045
+ text: model outputs
1046
+ image_size: (width, height)
1047
+ parse_tasks: a list of tasks to parse, if None, parse all tasks.
1048
+ """
1049
+ if parse_tasks is not None:
1050
+ if isinstance(parse_tasks, str):
1051
+ parse_tasks = [parse_tasks]
1052
+ for _parse_task in parse_tasks:
1053
+ assert _parse_task in self.parse_tasks, f'parse task {_parse_task} not supported'
1054
+
1055
+ # sequence or text should be provided
1056
+ assert sequence is not None or text is not None, 'sequence or text should be provided'
1057
+ assert sequence is None or text is None, 'only one of sequence and text should be provided'
1058
+
1059
+ if sequence is not None:
1060
+ sequence = sequence.tolist()[1:]
1061
+ text, spans = self.decode_with_spans(self.tokenizer, sequence)
1062
+ if transition_beam_score is not None:
1063
+ transition_beam_score = transition_beam_score.tolist()
1064
+ assert len(sequence) == len(transition_beam_score)
1065
+ else:
1066
+ spans = None
1067
+ transition_beam_score = None
1068
+
1069
+ parsed_dict = {
1070
+ 'text': text
1071
+ }
1072
+
1073
+ for task in self.parse_tasks:
1074
+ if parse_tasks is not None and task not in parse_tasks:
1075
+ continue
1076
+
1077
+ pattern = self.parse_tasks_configs[task].get('PATTERN', None)
1078
+ score_mode = self.parse_tasks_configs[task].get('SCORE_MODE', None)
1079
+
1080
+ if task == 'ocr':
1081
+ instances = self.parse_ocr_from_text_and_spans(
1082
+ text,
1083
+ pattern=pattern,
1084
+ image_size=image_size,
1085
+ area_threshold=self.parse_tasks_configs[task].get('AREA_THRESHOLD', 0.0),
1086
+ )
1087
+ parsed_dict['ocr'] = instances
1088
+ elif task == 'phrase_grounding':
1089
+ instances = self.parse_phrase_grounding_from_text_and_spans(
1090
+ text,
1091
+ pattern=pattern,
1092
+ image_size=image_size,
1093
+ )
1094
+ parsed_dict['phrase_grounding'] = instances
1095
+ elif task == 'pure_text':
1096
+ parsed_dict['pure_text'] = text
1097
+ elif task == 'description_with_bboxes':
1098
+ instances = self.parse_description_with_bboxes_from_text_and_spans(
1099
+ text,
1100
+ spans=spans,
1101
+ scores=transition_beam_score,
1102
+ score_mode=score_mode,
1103
+ pattern=pattern,
1104
+ image_size=image_size,
1105
+ )
1106
+ parsed_dict['description_with_bboxes'] = instances
1107
+ elif task == 'description_with_polygons':
1108
+ instances = self.parse_description_with_polygons_from_text_and_spans(
1109
+ text,
1110
+ pattern=pattern,
1111
+ image_size=image_size,
1112
+ )
1113
+ parsed_dict['description_with_polygons'] = instances
1114
+ elif task == 'polygons':
1115
+ instances = self.parse_description_with_polygons_from_text_and_spans(
1116
+ text,
1117
+ pattern=pattern,
1118
+ image_size=image_size,
1119
+ allow_empty_phrase=True,
1120
+ )
1121
+ parsed_dict['polygons'] = instances
1122
+ elif task == 'bboxes':
1123
+ instances = self.parse_description_with_bboxes_from_text_and_spans(
1124
+ text,
1125
+ pattern=pattern,
1126
+ image_size=image_size,
1127
+ allow_empty_phrase=True,
1128
+ )
1129
+ parsed_dict['bboxes'] = instances
1130
+ elif task == 'description_with_bboxes_or_polygons':
1131
+ if '<poly>' in text:
1132
+ # only support either polygons or bboxes, not both at the same time
1133
+ instances = self.parse_description_with_polygons_from_text_and_spans(
1134
+ text,
1135
+ pattern=pattern,
1136
+ image_size=image_size,
1137
+ )
1138
+ else:
1139
+ instances = self.parse_description_with_bboxes_from_text_and_spans(
1140
+ text,
1141
+ pattern=pattern,
1142
+ image_size=image_size,
1143
+ )
1144
+ parsed_dict['description_with_bboxes_or_polygons'] = instances
1145
+ else:
1146
+ raise ValueError("task {} is not supported".format(task))
1147
+
1148
+ return parsed_dict
t5xxl_fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e480b09fae049a72d2a8c5fbccb8d3e92febeb233bbe9dfe7256958a9167635
3
+ size 9787841024
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model_max_length": 1024
3
+ }
4
+