File size: 2,493 Bytes
87d2548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
462d7a2
 
 
 
 
 
 
 
 
 
 
87d2548
 
 
 
 
 
 
 
462d7a2
87d2548
 
 
 
 
 
 
 
 
462d7a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87d2548
462d7a2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import argparse
import gradio as gr
import torch
from PIL import Image
import re
from transformers import DonutProcessor, VisionEncoderDecoderModel

def demo_process(input_img, question=None):
    global processor, model

    input_img = Image.fromarray(input_img)
    pixel_values = processor(input_img, return_tensors="pt").pixel_values.to(device)

    if question:
        task_prompt = f"<s_{question}>"
        decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids.to(device)
    else:
        task_prompt = "<s_cord-v2>"
        decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids.to(device)

    with torch.no_grad():
        outputs = model.generate(
            pixel_values,
            decoder_input_ids=decoder_input_ids,
            max_length=1024,  
            early_stopping=True,
            pad_token_id=processor.tokenizer.pad_token_id,
            eos_token_id=processor.tokenizer.eos_token_id,
            use_cache=True,
            num_beams=1,
            bad_words_ids=[[processor.tokenizer.unk_token_id]],
            return_dict_in_generate=True,
        )

    seq = processor.batch_decode(outputs.sequences)[0]
    seq = seq.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "")
    seq = re.sub(r"<.*?>", "", seq, count=1).strip()
    seq = processor.token2json(seq)
    return seq

parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default="cord-v2")
parser.add_argument("--pretrained_path", type=str, default="suthawadee/donut-demo_new")
args, left_argv = parser.parse_known_args()

processor = DonutProcessor.from_pretrained(args.pretrained_path)
model = VisionEncoderDecoderModel.from_pretrained(args.pretrained_path)
device = "cpu" if not torch.cuda.is_available() else "cuda"
model.to(device)
model.eval()

# เพิ่มตัวอย่างรูปภาพที่มีอยู่เพื่อทดสอบ
image1 = "8.jpg"
image2 = "15.jpg"

examples = [
    [Image.open(image1)],
    [Image.open(image2)]
]

def main(pretrained_path, examples):
    demo = gr.Interface(
        fn=demo_process,
        inputs=["image", "text"] if args.task == "docvqa" else "image",
        outputs="json",
        title="🇹🇭🧾ThaiReceipt",
        description="Upload image.",
        examples=examples
    )
    demo.launch(debug=True)

main(args.pretrained_path, examples)