File size: 3,082 Bytes
37d5625
 
4afee64
37d5625
 
 
b03a0b6
 
 
 
98307cd
4afee64
98307cd
4afee64
 
98307cd
 
 
 
 
 
 
b03a0b6
37d5625
98307cd
4afee64
 
98307cd
 
 
 
 
4afee64
 
98307cd
4afee64
98307cd
 
4afee64
98307cd
4afee64
 
 
 
 
98307cd
4afee64
98307cd
4afee64
 
 
 
98307cd
4afee64
 
 
 
 
 
 
98307cd
4afee64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98307cd
4afee64
 
 
 
98307cd
4afee64
 
 
98307cd
4afee64
 
 
 
 
98307cd
4afee64
 
b03a0b6
4afee64
b03a0b6
b20bca2
 
4afee64
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101


# pdfs 下的识别结果转成 ocr 的格试 image/md5.txt json/md5.json , 拷贝到这里 hubggingface/project/ocr/ocrServer/data ,就不用重新识别了

# see huggingface/project/ocr/tools/flask_auto_selection.py

import numpy as np
import cv2
import json
import base64
import os
import shutil
import hashlib
import glob
import re

def md5(fname):
    hash_md5 = hashlib.md5()
    with open(fname, "rb") as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hash_md5.update(chunk)
    return hash_md5.hexdigest()

if __name__ == '__main__':

    dir_book = 'pdfs/jp/徐一平日本语句型辞典'

    dir = 'data'
    if os.path.exists(dir):
        shutil.rmtree(dir)
        os.makedirs(os.path.join(dir, 'img'))
        os.makedirs(os.path.join(dir, 'json'))
        os.makedirs(os.path.join(dir, os.path.basename(dir_book)))

    


    
    pths = glob.glob(dir_book + '/*.jpg', recursive=False)

    pths = sorted(pths, key=lambda p:(
        match := re.findall(r'.+?(\d+)\.jpg', p),
        number := int(match[0]),
        number # 如果第一项相等就会比较第二项
    ))

    for idx, pth in enumerate(pths):

        m5 = md5(pth)
        p_j = pth.replace('.jpg', '.json')
        if not os.path.exists(p_j):
            raise Exception(f'##### error: json not found. {p_j}')

        imgData = np.fromfile(pth, dtype=np.uint8)
        img = cv2.imdecode(imgData, cv2.IMREAD_UNCHANGED)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # 把img 对象编码为jpg 格式
        success, encoded_image = cv2.imencode(".jpg", img)
        img_bytes = encoded_image.tobytes()
        base64_str = base64.b64encode(img_bytes).decode('ascii')

        img_bytes_restored = base64.b64decode(base64_str)
        imgdata_restored = np.frombuffer(img_bytes_restored, dtype=np.uint8) # .reshape(img.shape)
        img_restored = cv2.imdecode(imgdata_restored, cv2.IMREAD_UNCHANGED)


        pth_img = os.path.join(dir, 'img/{}.txt'.format(m5))
        pth_json = os.path.join(dir, 'json/{}.json'.format(m5))
        pth_book = os.path.join(dir, os.path.basename(dir_book))

        with open(pth_img, 'w', encoding='utf-8') as f:
            f.write(base64_str)

        dest = shutil.copy(p_j, pth_json)

        dest = shutil.copy(pth, pth_book)  

        if not os.path.exists(pth_img):  # 没有相应的图片,可能被删除了
            # raise Exception(f'Warnnig: no image {pth_img}')
            print( f'Warnnig: no image {pth_img}' )
            pass

        if not os.path.exists(pth_json):
            # raise Exception(f'Warnnig: no image {pth_json}')
            pass

        # with open(pth_img, "r", encoding="utf-8") as fp:
        #     imgdata = fp.read()
        #     imgdata = base64.b64decode(imgdata)
        #     imgdata = np.frombuffer(imgdata, np.uint8)
        #     img = cv2.imdecode(imgdata, cv2.IMREAD_UNCHANGED)

            # cv2.imshow('img', img)
            # cv2.waitKey(0)

        print( f'one task done. {idx+1} / {len(pths)}' )

        break

    print( 'all task done.' )