Image-to-Text
Transformers
Safetensors
Khmer
khmer-ocr
feature-extraction
transformer
text-recognition
crnn
khmer-text-recognition
custom_code
File size: 2,391 Bytes
10cc428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# tokenization_khmerocr.py
import json
import os
from typing import List, Optional, Tuple, Union, Dict
from transformers import PreTrainedTokenizer

class KhmerOCRTokenizer(PreTrainedTokenizer):
    """

    Custom Character-level Tokenizer for Khmer OCR

    """
    vocab_files_names = {"vocab_file": "vocab.json"}
    model_input_names = ["input_ids", "attention_mask"]

    def __init__(

        self,

        vocab_file=None,

        unk_token="<unk>",

        pad_token="<pad>",

        bos_token="<sos>",

        eos_token="<eos>",

        **kwargs

    ):
        # 1. Initialize empty vocab/decoder BEFORE calling super()
        self.vocab = {}
        self.decoder = {}

        # 2. Load vocab immediately if file is provided
        if vocab_file:
            with open(vocab_file, encoding="utf-8") as f:
                self.vocab = json.load(f)
            self.decoder = {v: k for k, v in self.vocab.items()}

        # 3. NOW call super() (Parent class can now safely call get_vocab())
        super().__init__(
            unk_token=unk_token,
            pad_token=pad_token,
            bos_token=bos_token,
            eos_token=eos_token,
            **kwargs
        )

        # Ensure special tokens are in the vocab logic
        self.pad_token_id = self.vocab.get(pad_token, 0)
        self.bos_token_id = self.vocab.get(bos_token, 1)
        self.eos_token_id = self.vocab.get(eos_token, 2)

    @property
    def vocab_size(self):
        return len(self.vocab)

    def _tokenize(self, text: str) -> List[str]:
        return list(text)

    def _convert_token_to_id(self, token: str) -> int:
        return self.vocab.get(token, self.vocab.get(self.unk_token))

    def _convert_id_to_token(self, index: int) -> str:
        return self.decoder.get(index, self.unk_token)

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
        if filename_prefix:
            vocab_file = os.path.join(save_directory, f"{filename_prefix}-vocab.json")
        else:
            vocab_file = os.path.join(save_directory, "vocab.json")

        with open(vocab_file, "w", encoding="utf-8") as f:
            json.dump(self.vocab, f, ensure_ascii=False)

        return (vocab_file,)

    def get_vocab(self) -> Dict[str, int]:
        return self.vocab