Update README.md
Browse files
README.md
CHANGED
|
@@ -11,40 +11,32 @@ The model is translated from [PaddlePaddle/ernie-layoutx-base-uncased](https://h
|
|
| 11 |
```python
|
| 12 |
import torch
|
| 13 |
from PIL import Image
|
| 14 |
-
import numpy as np
|
| 15 |
import torch.nn.functional as F
|
| 16 |
-
from networks
|
| 17 |
-
|
| 18 |
-
|
| 19 |
|
| 20 |
pretrain_torch_model_or_path = "Norm/ERNIE-Layout-Pytorch"
|
| 21 |
-
doc_imag_path = "
|
| 22 |
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
# Dummy Input
|
| 26 |
-
context = ['This is an example document', 'All ocr boxes are inserted into this list']
|
| 27 |
-
layout = [[381, 91, 505, 115], [738, 96, 804, 122]] # all boxes are resized between 0 - 1000
|
| 28 |
pil_image = Image.open(doc_imag_path).convert("RGB")
|
| 29 |
|
| 30 |
# initialize tokenizer
|
| 31 |
tokenizer = ErnieLayoutTokenizerFast.from_pretrained(pretrained_model_name_or_path=pretrain_torch_model_or_path)
|
| 32 |
|
| 33 |
# initialize feature extractor
|
| 34 |
-
feature_extractor =
|
| 35 |
-
processor =
|
| 36 |
|
| 37 |
# Tokenize context & questions
|
| 38 |
-
context_encodings = processor(pil_image, context)
|
| 39 |
question = "what is it?"
|
| 40 |
-
|
| 41 |
-
tokenized_res['input_ids'] = torch.tensor([tokenized_res['input_ids']]).to(device)
|
| 42 |
-
tokenized_res['bbox'] = torch.tensor([tokenized_res['bbox']]).to(device)
|
| 43 |
-
tokenized_res['pixel_values'] = torch.tensor(np.array(context_encodings.data['pixel_values'])).to(device)
|
| 44 |
|
| 45 |
# dummy answer start && end index
|
| 46 |
-
|
| 47 |
-
|
| 48 |
|
| 49 |
# initialize config
|
| 50 |
config = ErnieLayoutConfig.from_pretrained(pretrained_model_name_or_path=pretrain_torch_model_or_path)
|
|
@@ -55,15 +47,15 @@ model = ErnieLayoutForQuestionAnswering.from_pretrained(
|
|
| 55 |
pretrained_model_name_or_path=pretrain_torch_model_or_path,
|
| 56 |
config=config,
|
| 57 |
)
|
| 58 |
-
model.to(device)
|
| 59 |
|
| 60 |
-
output = model(**
|
| 61 |
|
| 62 |
# decode output
|
| 63 |
start_max = torch.argmax(F.softmax(output.start_logits, dim=-1))
|
| 64 |
end_max = torch.argmax(F.softmax(output.end_logits, dim=-1)) + 1 # add one ##because of python list indexing
|
| 65 |
-
answer = tokenizer.decode(
|
| 66 |
print(answer)
|
| 67 |
|
| 68 |
|
|
|
|
| 69 |
```
|
|
|
|
| 11 |
```python
|
| 12 |
import torch
|
| 13 |
from PIL import Image
|
|
|
|
| 14 |
import torch.nn.functional as F
|
| 15 |
+
from networks import ErnieLayoutConfig, ErnieLayoutForQuestionAnswering, \
|
| 16 |
+
ErnieLayoutProcessor, ErnieLayoutTokenizerFast
|
| 17 |
+
from transformers.models.layoutlmv3 import LayoutLMv3ImageProcessor
|
| 18 |
|
| 19 |
pretrain_torch_model_or_path = "Norm/ERNIE-Layout-Pytorch"
|
| 20 |
+
doc_imag_path = "./dummy_input.jpeg"
|
| 21 |
|
| 22 |
+
context = ['This is an example sequence', 'All ocr boxes are inserted into this list']
|
| 23 |
+
layout = [[381, 91, 505, 115], [738, 96, 804, 122]] # make sure all boxes are normalized between 0 - 1000
|
|
|
|
|
|
|
|
|
|
| 24 |
pil_image = Image.open(doc_imag_path).convert("RGB")
|
| 25 |
|
| 26 |
# initialize tokenizer
|
| 27 |
tokenizer = ErnieLayoutTokenizerFast.from_pretrained(pretrained_model_name_or_path=pretrain_torch_model_or_path)
|
| 28 |
|
| 29 |
# initialize feature extractor
|
| 30 |
+
feature_extractor = LayoutLMv3ImageProcessor(apply_ocr=False)
|
| 31 |
+
processor = ErnieLayoutProcessor(image_processor=feature_extractor, tokenizer=tokenizer)
|
| 32 |
|
| 33 |
# Tokenize context & questions
|
|
|
|
| 34 |
question = "what is it?"
|
| 35 |
+
encoding = processor(pil_image, question, context, boxes=layout, return_tensors="pt")
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
# dummy answer start && end index
|
| 38 |
+
start_positions = torch.tensor([6])
|
| 39 |
+
end_positions = torch.tensor([12])
|
| 40 |
|
| 41 |
# initialize config
|
| 42 |
config = ErnieLayoutConfig.from_pretrained(pretrained_model_name_or_path=pretrain_torch_model_or_path)
|
|
|
|
| 47 |
pretrained_model_name_or_path=pretrain_torch_model_or_path,
|
| 48 |
config=config,
|
| 49 |
)
|
|
|
|
| 50 |
|
| 51 |
+
output = model(**encoding, start_positions=start_positions, end_positions=end_positions)
|
| 52 |
|
| 53 |
# decode output
|
| 54 |
start_max = torch.argmax(F.softmax(output.start_logits, dim=-1))
|
| 55 |
end_max = torch.argmax(F.softmax(output.end_logits, dim=-1)) + 1 # add one ##because of python list indexing
|
| 56 |
+
answer = tokenizer.decode(encoding.input_ids[0][start_max: end_max])
|
| 57 |
print(answer)
|
| 58 |
|
| 59 |
|
| 60 |
+
|
| 61 |
```
|