init
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- .gitignore +143 -0
- .readthedocs.yaml +19 -0
- LICENSE +21 -0
- Makefile +44 -0
- README.md +284 -0
- README_cn.md +288 -0
- docs/RELEASE.md +372 -0
- docs/buymeacoffee.md +35 -0
- docs/command.md +150 -0
- docs/contact.md +37 -0
- docs/demo.md +20 -0
- docs/examples.md +221 -0
- docs/examples/test-doc.pdf +3 -0
- docs/examples_en.md +219 -0
- docs/faq.md +8 -0
- docs/figs/breezedeus.ico +0 -0
- docs/index.md +263 -0
- docs/index_en.md +263 -0
- docs/install.md +49 -0
- docs/models.md +99 -0
- docs/pix2text/latex_ocr.md +1 -0
- docs/pix2text/pix_to_text.md +1 -0
- docs/pix2text/table_ocr.md +1 -0
- docs/pix2text/text_formula_ocr.md +1 -0
- docs/requirements.txt +387 -0
- docs/train.md +3 -0
- docs/usage.md +547 -0
- mkdocs.yml +119 -0
- pix2text/__init__.py +14 -0
- pix2text/__version__.py +5 -0
- pix2text/app.py +61 -0
- pix2text/cli.py +751 -0
- pix2text/consts.py +196 -0
- pix2text/doc_xl_layout/__init__.py +4 -0
- pix2text/doc_xl_layout/detectors/__init__.py +0 -0
- pix2text/doc_xl_layout/detectors/base_detector_subfield.py +206 -0
- pix2text/doc_xl_layout/detectors/ctdet_subfield.py +225 -0
- pix2text/doc_xl_layout/detectors/detector_factory.py +7 -0
- pix2text/doc_xl_layout/doc_xl_layout_parser.py +478 -0
- pix2text/doc_xl_layout/external/__init__.py +0 -0
- pix2text/doc_xl_layout/external/shapelyNMS.py +75 -0
- pix2text/doc_xl_layout/huntie_subfield.py +13 -0
- pix2text/doc_xl_layout/opts.py +410 -0
- pix2text/doc_xl_layout/utils/__init__.py +0 -0
- pix2text/doc_xl_layout/utils/ddd_utils.py +139 -0
- pix2text/doc_xl_layout/utils/debugger.py +606 -0
- pix2text/doc_xl_layout/utils/evaluation_bk.py +437 -0
- pix2text/doc_xl_layout/utils/image.py +242 -0
- pix2text/doc_xl_layout/utils/post_process.py +143 -0
.gitattributes
CHANGED
|
@@ -1,4 +1,6 @@
|
|
| 1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
| 2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.ipynb filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.pdf filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 5 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 6 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
.DS_Store
|
| 12 |
+
data/
|
| 13 |
+
models/
|
| 14 |
+
output-*/
|
| 15 |
+
outputs-*/
|
| 16 |
+
outputs/
|
| 17 |
+
*.jpg
|
| 18 |
+
*.jpeg
|
| 19 |
+
*.png
|
| 20 |
+
docs/feedbacks/
|
| 21 |
+
*.tar
|
| 22 |
+
*.pth
|
| 23 |
+
build/
|
| 24 |
+
develop-eggs/
|
| 25 |
+
dist/
|
| 26 |
+
downloads/
|
| 27 |
+
eggs/
|
| 28 |
+
.eggs/
|
| 29 |
+
.idea/
|
| 30 |
+
lib/
|
| 31 |
+
lib64/
|
| 32 |
+
parts/
|
| 33 |
+
sdist/
|
| 34 |
+
var/
|
| 35 |
+
wheels/
|
| 36 |
+
pip-wheel-metadata/
|
| 37 |
+
share/python-wheels/
|
| 38 |
+
*.egg-info/
|
| 39 |
+
.installed.cfg
|
| 40 |
+
*.egg
|
| 41 |
+
MANIFEST
|
| 42 |
+
|
| 43 |
+
# PyInstaller
|
| 44 |
+
# Usually these files are written by a python script from a template
|
| 45 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 46 |
+
*.manifest
|
| 47 |
+
*.spec
|
| 48 |
+
|
| 49 |
+
# Installer logs
|
| 50 |
+
pip-log.txt
|
| 51 |
+
pip-delete-this-directory.txt
|
| 52 |
+
|
| 53 |
+
# Unit test / coverage reports
|
| 54 |
+
htmlcov/
|
| 55 |
+
.tox/
|
| 56 |
+
.nox/
|
| 57 |
+
.coverage
|
| 58 |
+
.coverage.*
|
| 59 |
+
.cache
|
| 60 |
+
nosetests.xml
|
| 61 |
+
coverage.xml
|
| 62 |
+
*.cover
|
| 63 |
+
*.py,cover
|
| 64 |
+
.hypothesis/
|
| 65 |
+
.pytest_cache/
|
| 66 |
+
|
| 67 |
+
# Translations
|
| 68 |
+
*.mo
|
| 69 |
+
*.pot
|
| 70 |
+
|
| 71 |
+
# Django stuff:
|
| 72 |
+
*.log
|
| 73 |
+
local_settings.py
|
| 74 |
+
db.sqlite3
|
| 75 |
+
db.sqlite3-journal
|
| 76 |
+
|
| 77 |
+
# Flask stuff:
|
| 78 |
+
instance/
|
| 79 |
+
.webassets-cache
|
| 80 |
+
|
| 81 |
+
# Scrapy stuff:
|
| 82 |
+
.scrapy
|
| 83 |
+
|
| 84 |
+
# Sphinx documentation
|
| 85 |
+
docs/_build/
|
| 86 |
+
|
| 87 |
+
# PyBuilder
|
| 88 |
+
target/
|
| 89 |
+
|
| 90 |
+
# Jupyter Notebook
|
| 91 |
+
.ipynb_checkpoints
|
| 92 |
+
|
| 93 |
+
# IPython
|
| 94 |
+
profile_default/
|
| 95 |
+
ipython_config.py
|
| 96 |
+
|
| 97 |
+
# pyenv
|
| 98 |
+
.python-version
|
| 99 |
+
|
| 100 |
+
# pipenv
|
| 101 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 102 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 103 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 104 |
+
# install all needed dependencies.
|
| 105 |
+
#Pipfile.lock
|
| 106 |
+
|
| 107 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 108 |
+
__pypackages__/
|
| 109 |
+
|
| 110 |
+
# Celery stuff
|
| 111 |
+
celerybeat-schedule
|
| 112 |
+
celerybeat.pid
|
| 113 |
+
|
| 114 |
+
# SageMath parsed files
|
| 115 |
+
*.sage.py
|
| 116 |
+
|
| 117 |
+
# Environments
|
| 118 |
+
.env
|
| 119 |
+
.venv
|
| 120 |
+
env/
|
| 121 |
+
venv/
|
| 122 |
+
ENV/
|
| 123 |
+
env.bak/
|
| 124 |
+
venv.bak/
|
| 125 |
+
|
| 126 |
+
# Spyder project settings
|
| 127 |
+
.spyderproject
|
| 128 |
+
.spyproject
|
| 129 |
+
|
| 130 |
+
# Rope project settings
|
| 131 |
+
.ropeproject
|
| 132 |
+
|
| 133 |
+
# mkdocs documentation
|
| 134 |
+
/site
|
| 135 |
+
|
| 136 |
+
# mypy
|
| 137 |
+
.mypy_cache/
|
| 138 |
+
.dmypy.json
|
| 139 |
+
dmypy.json
|
| 140 |
+
|
| 141 |
+
# Pyre type checker
|
| 142 |
+
.pyre/
|
| 143 |
+
|
.readthedocs.yaml
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Read the Docs configuration file for MkDocs projects
|
| 2 |
+
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
| 3 |
+
|
| 4 |
+
# Required
|
| 5 |
+
version: 2
|
| 6 |
+
|
| 7 |
+
# Set the version of Python and other tools you might need
|
| 8 |
+
build:
|
| 9 |
+
os: ubuntu-22.04
|
| 10 |
+
tools:
|
| 11 |
+
python: "3.9"
|
| 12 |
+
|
| 13 |
+
mkdocs:
|
| 14 |
+
configuration: mkdocs.yml
|
| 15 |
+
|
| 16 |
+
# Optionally declare the Python requirements required to build your docs
|
| 17 |
+
python:
|
| 18 |
+
install:
|
| 19 |
+
- requirements: docs/requirements.txt
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2022 BreezeDeus
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
Makefile
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
predict:
|
| 2 |
+
p2t predict -l en,ch_sim -a mfd -t yolov7_tiny -i docs/examples/mixed.jpg --save-analysis-res tmp-output.jpg
|
| 3 |
+
# p2t predict -l en,ch_sim --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' \
|
| 4 |
+
# --use-analyzer -a mfd -t yolov7 --resized-shape 768 \
|
| 5 |
+
# --analyzer-model-fp ~/.cnstd/1.2/analysis/mfd-yolov7-epoch224-20230613.pt \
|
| 6 |
+
# --latex-ocr-model-fp ~/.pix2text/formula/p2t-mfr-20230702.pth \
|
| 7 |
+
# -i docs/examples/mixed.jpg --save-analysis-res tmp-output.jpg
|
| 8 |
+
# p2t predict -l vi \
|
| 9 |
+
# --use-analyzer -a mfd -t yolov7 --resized-shape 768 \
|
| 10 |
+
# --analyzer-model-fp ~/.cnstd/1.2/analysis/mfd-yolov7-epoch224-20230613.pt \
|
| 11 |
+
# --latex-ocr-model-fp ~/.pix2text/formula/p2t-mfr-20230702.pth \
|
| 12 |
+
# -i docs/examples/vietnamese.jpg --save-analysis-res tmp-output.jpg
|
| 13 |
+
# p2t predict -l en,ch_tra \
|
| 14 |
+
# --use-analyzer -a mfd -t yolov7 --resized-shape 768 \
|
| 15 |
+
# --analyzer-model-fp ~/.cnstd/1.2/analysis/mfd-yolov7-epoch224-20230613.pt \
|
| 16 |
+
# --latex-ocr-model-fp ~/.pix2text/formula/p2t-mfr-20230702.pth --rec-kwargs '{"det_bbox_max_expand_ratio": 0}'\
|
| 17 |
+
# -i docs/examples/ch_tra7.jpg --save-analysis-res tmp-output.jpg
|
| 18 |
+
|
| 19 |
+
evaluate-mfr:
|
| 20 |
+
p2t evaluate -l en,ch_sim --mfd-config '{"model_name": "mfd"}' \
|
| 21 |
+
--formula-ocr-config '{"model_name":"mfr-1.5","model_backend":"onnx"}' \
|
| 22 |
+
--text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' \
|
| 23 |
+
--resized-shape 768 --auto-line-break --file-type formula \
|
| 24 |
+
--max-samples 50 --prefix-img-dir data \
|
| 25 |
+
-i data/exported_call_events_with_images.json -o data/exported_cer_mfr1.0.json \
|
| 26 |
+
--output-excel data/exported_cer_mfr1.0.xls --output-html data/exported_cer_mfr1.0.html
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
package:
|
| 30 |
+
rm -rf build
|
| 31 |
+
python setup.py sdist bdist_wheel
|
| 32 |
+
|
| 33 |
+
VERSION := $(shell sed -n "s/^__version__ = '\(.*\)'/\1/p" pix2text/__version__.py)
|
| 34 |
+
upload:
|
| 35 |
+
python -m twine upload dist/pix2text-$(VERSION)* --verbose
|
| 36 |
+
|
| 37 |
+
# 开启 OCR HTTP 服务
|
| 38 |
+
serve:
|
| 39 |
+
p2t serve -l en,ch_sim -a mfd -t yolov7 --analyzer-model-fp ~/.cnstd/1.2/analysis/mfd-yolov7-epoch224-20230613.pt --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}'
|
| 40 |
+
|
| 41 |
+
docker-build:
|
| 42 |
+
docker build -t breezedeus/pix2text:v$(VERSION) .
|
| 43 |
+
|
| 44 |
+
.PHONY: package upload serve daemon
|
README.md
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
<img src="./docs/figs/p2t-logo.png" width="220px"/>
|
| 3 |
+
<div> </div>
|
| 4 |
+
|
| 5 |
+
[](https://discord.gg/GgD87WM8Tf)
|
| 6 |
+
[](https://pepy.tech/project/pix2text)
|
| 7 |
+
[](https://visitorbadge.io/status?path=https%3A%2F%2Fgithub.com%2Fbreezedeus%2FPix2Text)
|
| 8 |
+
[](./LICENSE)
|
| 9 |
+
[](https://badge.fury.io/py/pix2text)
|
| 10 |
+
[](https://github.com/breezedeus/pix2text)
|
| 11 |
+
[](https://github.com/breezedeus/pix2text)
|
| 12 |
+

|
| 13 |
+

|
| 14 |
+
[](https://twitter.com/breezedeus)
|
| 15 |
+
|
| 16 |
+
[📖 Doc](https://pix2text.readthedocs.io) |
|
| 17 |
+
[👩🏻💻 Online Service](https://p2t.breezedeus.com) |
|
| 18 |
+
[👨🏻💻 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo) |
|
| 19 |
+
[💬 Contact](https://www.breezedeus.com/article/join-group)
|
| 20 |
+
|
| 21 |
+
</div>
|
| 22 |
+
|
| 23 |
+
<div align="center">
|
| 24 |
+
|
| 25 |
+
[中文](./README_cn.md) | English
|
| 26 |
+
|
| 27 |
+
</div>
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# Pix2Text
|
| 32 |
+
|
| 33 |
+
## Update 2025.07.25: **V1.1.4** Released
|
| 34 |
+
|
| 35 |
+
Major Changes:
|
| 36 |
+
|
| 37 |
+
- Upgraded the Mathematical Formula Detection (MFD) and Mathematical Formula Recognition (MFR) models to version 1.5. All default configurations, documentation, and examples now use `mfd-1.5` and `mfr-1.5` as the standard models.
|
| 38 |
+
|
| 39 |
+
## Update 2025.04.15: **V1.1.3** Released
|
| 40 |
+
|
| 41 |
+
Major Changes:
|
| 42 |
+
|
| 43 |
+
- Support for `VlmTableOCR` and `VlmTextFormulaOCR` models based on the VLM interface (see [LiteLLM documentation](https://docs.litellm.ai/docs/)) allowing the use of closed-source VLM models. Installation command: `pip install pix2text[vlm]`.
|
| 44 |
+
- Usage examples can be found in [tests/test_vlm.py](tests/test_vlm.py) and [tests/test_pix2text.py](tests/test_pix2text.py).
|
| 45 |
+
|
| 46 |
+
## Update 2024.11.17: **V1.1.2** Released
|
| 47 |
+
|
| 48 |
+
Major Changes:
|
| 49 |
+
|
| 50 |
+
* A new layout analysis model [DocLayout-YOLO](https://github.com/opendatalab/DocLayout-YOLO) has been integrated, improving the accuracy of layout analysis.
|
| 51 |
+
|
| 52 |
+
## Update 2024.06.18:**V1.1.1** Released
|
| 53 |
+
|
| 54 |
+
Major changes:
|
| 55 |
+
|
| 56 |
+
* Support the new mathematical formula detection models (MFD): [breezedeus/pix2text-mfd](https://huggingface.co/breezedeus/pix2text-mfd) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfd)), which significantly improves the accuracy of formula detection.
|
| 57 |
+
|
| 58 |
+
See details: [Pix2Text V1.1.1 Released, Bringing Better Mathematical Formula Detection Models | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-v1.1.1).
|
| 59 |
+
|
| 60 |
+
## Update 2024.04.28: **V1.1** Released
|
| 61 |
+
|
| 62 |
+
Major changes:
|
| 63 |
+
|
| 64 |
+
* Added layout analysis and table recognition models, supporting the conversion of images with complex layouts into Markdown format. See examples: [Pix2Text Online Documentation / Examples](https://pix2text.readthedocs.io/zh-cn/stable/examples_en/).
|
| 65 |
+
* Added support for converting entire PDF files to Markdown format. See examples: [Pix2Text Online Documentation / Examples](https://pix2text.readthedocs.io/zh-cn/stable/examples_en/).
|
| 66 |
+
* Enhanced the interface with more features, including adjustments to existing interface parameters.
|
| 67 |
+
* Launched the [Pix2Text Online Documentation](https://pix2text.readthedocs.io).
|
| 68 |
+
|
| 69 |
+
## Update 2024.02.26: **V1.0** Released
|
| 70 |
+
|
| 71 |
+
Main Changes:
|
| 72 |
+
|
| 73 |
+
* The Mathematical Formula Recognition (MFR) model employs a new architecture and has been trained on a new dataset, achieving state-of-the-art (SOTA) accuracy. For detailed information, please see: [Pix2Text V1.0 New Release: The Best Open-Source Formula Recognition Model | Breezedeus.com](https://www.breezedeus.com/article/p2t-v1.0).
|
| 74 |
+
|
| 75 |
+
See more at: [RELEASE.md](docs/RELEASE.md) .
|
| 76 |
+
|
| 77 |
+
<br/>
|
| 78 |
+
|
| 79 |
+
**Pix2Text (P2T)** aims to be a **free and open-source Python** alternative to **[Mathpix](https://mathpix.com/)**, and it can already accomplish **Mathpix**'s core functionality. **Pix2Text (P2T) can recognize layouts, tables, images, text, mathematical formulas, and integrate all of these contents into Markdown format. P2T can also convert an entire PDF file (which can contain scanned images or any other format) into Markdown format.**
|
| 80 |
+
|
| 81 |
+
**Pix2Text (P2T)** integrates the following models:
|
| 82 |
+
|
| 83 |
+
- **Layout Analysis Model**: [breezedeus/pix2text-layout](https://huggingface.co/breezedeus/pix2text-layout) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-layout)).
|
| 84 |
+
- **Table Recognition Model**: [breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-table-rec)).
|
| 85 |
+
- **Text Recognition Engine**: Supports **80+ languages** such as **English, Simplified Chinese, Traditional Chinese, Vietnamese**, etc. For English and Simplified Chinese recognition, it uses the open-source OCR tool [CnOCR](https://github.com/breezedeus/cnocr), while for other languages, it uses the open-source OCR tool [EasyOCR](https://github.com/JaidedAI/EasyOCR).
|
| 86 |
+
- **Mathematical Formula Detection Model (MFD)**: [breezedeus/pix2text-mfd-1.5](https://huggingface.co/breezedeus/pix2text-mfd-1.5) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfd-1.5)). Implemented based on [CnSTD](https://github.com/breezedeus/cnstd).
|
| 87 |
+
- **Mathematical Formula Recognition Model (MFR)**: [breezedeus/pix2text-mfr-1.5](https://huggingface.co/breezedeus/pix2text-mfr-1.5) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfr-1.5)).
|
| 88 |
+
|
| 89 |
+
Several models are contributed by other open-source authors, and their contributions are highly appreciated.
|
| 90 |
+
|
| 91 |
+
<div align="center">
|
| 92 |
+
<img src="docs/figs/arch-flow.jpg" alt="Pix2Text Arch Flow"/>
|
| 93 |
+
</div>
|
| 94 |
+
|
| 95 |
+
For detailed explanations, please refer to the [Pix2Text Online Documentation/Models](https://pix2text.readthedocs.io/zh-cn/stable/models/).
|
| 96 |
+
|
| 97 |
+
<br/>
|
| 98 |
+
|
| 99 |
+
As a Python3 toolkit, P2T may not be very user-friendly for those who are not familiar with Python. Therefore, we also provide a **[free-to-use P2T Online Web](https://p2t.breezedeus.com)**, where you can directly upload images and get P2T parsing results. The web version uses the latest models, resulting in better performance compared to the open-source models.
|
| 100 |
+
|
| 101 |
+
If you're interested, feel free to add the assistant as a friend by scanning the QR code and mentioning `p2t`. The assistant will regularly invite everyone to join the group where the latest developments related to P2T tools will be announced:
|
| 102 |
+
|
| 103 |
+
<div align="center">
|
| 104 |
+
<img src="https://pix2text.readthedocs.io/zh-cn/stable/figs/wx-qr-code.JPG" alt="Wechat-QRCode" width="300px"/>
|
| 105 |
+
</div>
|
| 106 |
+
|
| 107 |
+
The author also maintains a **Knowledge Planet** [**P2T/CnOCR/CnSTD Private Group**](https://t.zsxq.com/FEYZRJQ), where questions are answered promptly. You're welcome to join. The **knowledge planet private group** will also gradually release some private materials related to P2T/CnOCR/CnSTD, including **some unreleased models**, **discounts on purchasing premium models**, **code snippets for different application scenarios**, and answers to difficult problems encountered during use. The planet will also publish the latest research materials related to P2T/OCR/STD.
|
| 108 |
+
|
| 109 |
+
For more contact method, please refer to [Contact](https://pix2text.readthedocs.io/zh-cn/stable/contact/).
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
## List of Supported Languages
|
| 113 |
+
|
| 114 |
+
The text recognition engine of Pix2Text supports **`80+` languages**, including **English, Simplified Chinese, Traditional Chinese, Vietnamese**, etc. Among these, **English** and **Simplified Chinese** recognition utilize the open-source OCR tool **[CnOCR](https://github.com/breezedeus/cnocr)**, while recognition for other languages employs the open-source OCR tool **[EasyOCR](https://github.com/JaidedAI/EasyOCR)**. Special thanks to the respective authors.
|
| 115 |
+
|
| 116 |
+
List of **Supported Languages** and **Language Codes** are shown below:
|
| 117 |
+
|
| 118 |
+
<details>
|
| 119 |
+
<summary>↓↓↓ Click to show details ↓↓↓</summary>
|
| 120 |
+
|
| 121 |
+
| Language | Code Name |
|
| 122 |
+
| ------------------- | ----------- |
|
| 123 |
+
| Abaza | abq |
|
| 124 |
+
| Adyghe | ady |
|
| 125 |
+
| Afrikaans | af |
|
| 126 |
+
| Angika | ang |
|
| 127 |
+
| Arabic | ar |
|
| 128 |
+
| Assamese | as |
|
| 129 |
+
| Avar | ava |
|
| 130 |
+
| Azerbaijani | az |
|
| 131 |
+
| Belarusian | be |
|
| 132 |
+
| Bulgarian | bg |
|
| 133 |
+
| Bihari | bh |
|
| 134 |
+
| Bhojpuri | bho |
|
| 135 |
+
| Bengali | bn |
|
| 136 |
+
| Bosnian | bs |
|
| 137 |
+
| Simplified Chinese | ch_sim |
|
| 138 |
+
| Traditional Chinese | ch_tra |
|
| 139 |
+
| Chechen | che |
|
| 140 |
+
| Czech | cs |
|
| 141 |
+
| Welsh | cy |
|
| 142 |
+
| Danish | da |
|
| 143 |
+
| Dargwa | dar |
|
| 144 |
+
| German | de |
|
| 145 |
+
| English | en |
|
| 146 |
+
| Spanish | es |
|
| 147 |
+
| Estonian | et |
|
| 148 |
+
| Persian (Farsi) | fa |
|
| 149 |
+
| French | fr |
|
| 150 |
+
| Irish | ga |
|
| 151 |
+
| Goan Konkani | gom |
|
| 152 |
+
| Hindi | hi |
|
| 153 |
+
| Croatian | hr |
|
| 154 |
+
| Hungarian | hu |
|
| 155 |
+
| Indonesian | id |
|
| 156 |
+
| Ingush | inh |
|
| 157 |
+
| Icelandic | is |
|
| 158 |
+
| Italian | it |
|
| 159 |
+
| Japanese | ja |
|
| 160 |
+
| Kabardian | kbd |
|
| 161 |
+
| Kannada | kn |
|
| 162 |
+
| Korean | ko |
|
| 163 |
+
| Kurdish | ku |
|
| 164 |
+
| Latin | la |
|
| 165 |
+
| Lak | lbe |
|
| 166 |
+
| Lezghian | lez |
|
| 167 |
+
| Lithuanian | lt |
|
| 168 |
+
| Latvian | lv |
|
| 169 |
+
| Magahi | mah |
|
| 170 |
+
| Maithili | mai |
|
| 171 |
+
| Maori | mi |
|
| 172 |
+
| Mongolian | mn |
|
| 173 |
+
| Marathi | mr |
|
| 174 |
+
| Malay | ms |
|
| 175 |
+
| Maltese | mt |
|
| 176 |
+
| Nepali | ne |
|
| 177 |
+
| Newari | new |
|
| 178 |
+
| Dutch | nl |
|
| 179 |
+
| Norwegian | no |
|
| 180 |
+
| Occitan | oc |
|
| 181 |
+
| Pali | pi |
|
| 182 |
+
| Polish | pl |
|
| 183 |
+
| Portuguese | pt |
|
| 184 |
+
| Romanian | ro |
|
| 185 |
+
| Russian | ru |
|
| 186 |
+
| Serbian (cyrillic) | rs_cyrillic |
|
| 187 |
+
| Serbian (latin) | rs_latin |
|
| 188 |
+
| Nagpuri | sck |
|
| 189 |
+
| Slovak | sk |
|
| 190 |
+
| Slovenian | sl |
|
| 191 |
+
| Albanian | sq |
|
| 192 |
+
| Swedish | sv |
|
| 193 |
+
| Swahili | sw |
|
| 194 |
+
| Tamil | ta |
|
| 195 |
+
| Tabassaran | tab |
|
| 196 |
+
| Telugu | te |
|
| 197 |
+
| Thai | th |
|
| 198 |
+
| Tajik | tjk |
|
| 199 |
+
| Tagalog | tl |
|
| 200 |
+
| Turkish | tr |
|
| 201 |
+
| Uyghur | ug |
|
| 202 |
+
| Ukranian | uk |
|
| 203 |
+
| Urdu | ur |
|
| 204 |
+
| Uzbek | uz |
|
| 205 |
+
| Vietnamese | vi |
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
> Ref: [Supported Languages](https://www.jaided.ai/easyocr/) .
|
| 209 |
+
|
| 210 |
+
</details>
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
## Online Service
|
| 215 |
+
|
| 216 |
+
Everyone can use the **[P2T Online Service](https://p2t.breezedeus.com)** for free, with a daily limit of 10,000 characters per account, which should be sufficient for normal use. *Please refrain from bulk API calls, as machine resources are limited, and this could prevent others from accessing the service.*
|
| 217 |
+
|
| 218 |
+
Due to hardware constraints, the Online Service currently only supports **Simplified Chinese** and **English** languages. To try the models in other languages, please use the following **Online Demo**.
|
| 219 |
+
|
| 220 |
+
## Online Demo 🤗
|
| 221 |
+
|
| 222 |
+
You can also try the **[Online Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)** to see the performance of **P2T** in various languages. However, the online demo operates on lower hardware specifications and may be slower. For Simplified Chinese or English images, it is recommended to use the **[P2T Online Service](https://p2t.breezedeus.com)**.
|
| 223 |
+
|
| 224 |
+
## Examples
|
| 225 |
+
|
| 226 |
+
See: [Pix2Text Online Documentation/Examples](https://pix2text.readthedocs.io/zh-cn/stable/examples_en/).
|
| 227 |
+
|
| 228 |
+
## Usage
|
| 229 |
+
|
| 230 |
+
See: [Pix2Text Online Documentation/Usage](https://pix2text.readthedocs.io/zh-cn/stable/usage/).
|
| 231 |
+
|
| 232 |
+
## Models
|
| 233 |
+
|
| 234 |
+
See: [Pix2Text Online Documentation/Models](https://pix2text.readthedocs.io/zh-cn/stable/models/).
|
| 235 |
+
|
| 236 |
+
## Install
|
| 237 |
+
|
| 238 |
+
Well, one line of command is enough if it goes well.
|
| 239 |
+
|
| 240 |
+
```bash
|
| 241 |
+
pip install pix2text
|
| 242 |
+
```
|
| 243 |
+
|
| 244 |
+
If you need to recognize languages other than **English** and **Simplified Chinese**, please use the following command to install additional packages:
|
| 245 |
+
|
| 246 |
+
```bash
|
| 247 |
+
pip install pix2text[multilingual]
|
| 248 |
+
```
|
| 249 |
+
|
| 250 |
+
If the installation is slow, you can specify an installation source, such as using the Aliyun source:
|
| 251 |
+
|
| 252 |
+
```bash
|
| 253 |
+
pip install pix2text -i https://mirrors.aliyun.com/pypi/simple
|
| 254 |
+
```
|
| 255 |
+
|
| 256 |
+
For more information, please refer to: [Pix2Text Online Documentation/Install](https://pix2text.readthedocs.io/zh-cn/stable/install/).
|
| 257 |
+
|
| 258 |
+
## Command Line Tool
|
| 259 |
+
|
| 260 |
+
See: [Pix2Text Online Documentation/Command Tool](https://pix2text.readthedocs.io/zh-cn/stable/command/).
|
| 261 |
+
|
| 262 |
+
## HTTP Service
|
| 263 |
+
|
| 264 |
+
See: [Pix2Text Online Documentation/Command Tool/Start Service](https://pix2text.readthedocs.io/zh-cn/stable/command/).
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
## MacOS Desktop Application
|
| 268 |
+
|
| 269 |
+
Please refer to [Pix2Text-Mac](https://github.com/breezedeus/Pix2Text-Mac) for installing the Pix2Text Desktop App for MacOS.
|
| 270 |
+
|
| 271 |
+
<div align="center">
|
| 272 |
+
<img src="https://github.com/breezedeus/Pix2Text-Mac/raw/main/assets/on_menu_bar.jpg" alt="Pix2Text Mac App" width="400px"/>
|
| 273 |
+
</div>
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
## A cup of coffee for the author
|
| 277 |
+
|
| 278 |
+
It is not easy to maintain and evolve the project, so if it is helpful to you, please consider [offering the author a cup of coffee 🥤](https://www.breezedeus.com/article/buy-me-coffee).
|
| 279 |
+
|
| 280 |
+
---
|
| 281 |
+
|
| 282 |
+
Official code base: [https://github.com/breezedeus/pix2text](https://github.com/breezedeus/pix2text). Please cite it properly.
|
| 283 |
+
|
| 284 |
+
For more information on Pix2Text (P2T), visit: [https://www.breezedeus.com/article/pix2text](https://www.breezedeus.com/article/pix2text).
|
README_cn.md
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
<img src="./docs/figs/p2t-logo.png" width="220px"/>
|
| 3 |
+
<div> </div>
|
| 4 |
+
|
| 5 |
+
[](https://discord.gg/GgD87WM8Tf)
|
| 6 |
+
[](https://pepy.tech/project/pix2text)
|
| 7 |
+
[](https://visitorbadge.io/status?path=https%3A%2F%2Fgithub.com%2Fbreezedeus%2FPix2Text)
|
| 8 |
+
[](./LICENSE)
|
| 9 |
+
[](https://badge.fury.io/py/pix2text)
|
| 10 |
+
[](https://github.com/breezedeus/pix2text)
|
| 11 |
+
[](https://github.com/breezedeus/pix2text)
|
| 12 |
+

|
| 13 |
+

|
| 14 |
+
[](https://twitter.com/breezedeus)
|
| 15 |
+
|
| 16 |
+
[📖 在线文档](https://pix2text.readthedocs.io) |
|
| 17 |
+
[👩🏻💻 网页版](https://p2t.breezedeus.com) |
|
| 18 |
+
[👨🏻💻 在线 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo) |
|
| 19 |
+
[💬 交流群](https://www.breezedeus.com/article/join-group)
|
| 20 |
+
|
| 21 |
+
</div>
|
| 22 |
+
|
| 23 |
+
<div align="center">
|
| 24 |
+
|
| 25 |
+
[English](./README.md) | 中文
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
</div>
|
| 29 |
+
|
| 30 |
+
# Pix2Text (P2T)
|
| 31 |
+
|
| 32 |
+
## Update 2025.07.25:发布 **V1.1.4**
|
| 33 |
+
|
| 34 |
+
主要变更:
|
| 35 |
+
|
| 36 |
+
- 数学公式检测(MFD)和数学公式识别(MFR)模型升级到 1.5 版本,所有默认配置、文档和示例均以 `mfd-1.5` 和 `mfr-1.5` 为标准模型。
|
| 37 |
+
|
| 38 |
+
## Update 2025.04.15:分布 **V1.1.3**
|
| 39 |
+
|
| 40 |
+
主要变更:
|
| 41 |
+
|
| 42 |
+
- 支持基于 VLM 接口(具体参考 [LiteLLM 文档](https://docs.litellm.ai/docs/))的 `VlmTableOCR` 和 `VlmTextFormulaOCR` 模型,可使用闭源 VLM 模型。安装命令:`pip install pix2text[vlm]`。
|
| 43 |
+
- 使用方式见 [tests/test_vlm.py](tests/test_vlm.py) 和 [tests/test_pix2text.py](tests/test_pix2text.py)。
|
| 44 |
+
|
| 45 |
+
## Update 2024.11.17:发布 **V1.1.2**
|
| 46 |
+
|
| 47 |
+
主要变更:
|
| 48 |
+
|
| 49 |
+
* 版面分析模型加入 [DocLayout-YOLO](https://github.com/opendatalab/DocLayout-YOLO),提升版面分析的准确性。
|
| 50 |
+
|
| 51 |
+
## Update 2024.06.18:发布 **V1.1.1**
|
| 52 |
+
|
| 53 |
+
主要变更:
|
| 54 |
+
|
| 55 |
+
* 支持新的数学公式检测模型(MFD):[breezedeus/pix2text-mfd](https://huggingface.co/breezedeus/pix2text-mfd) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd)),公式检测精度获得较大提升。
|
| 56 |
+
|
| 57 |
+
具体说明请见:[Pix2Text V1.1.1 发布,带来更好的数学公式检测模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-v1.1.1)。
|
| 58 |
+
|
| 59 |
+
## Update 2024.04.28:发布 **V1.1**
|
| 60 |
+
|
| 61 |
+
主要变更:
|
| 62 |
+
|
| 63 |
+
* 加入了版面分析和表格识别模型,支持把复杂排版的图片转换为 Markdown 格式,示例见:[Pix2Text 在线文档/Examples](https://pix2text.readthedocs.io/zh-cn/stable/examples/)。
|
| 64 |
+
* 支持把整个 PDF 文件转换为 Markdown 格式,示例见:[Pix2Text 在线文档/Examples](https://pix2text.readthedocs.io/zh-cn/stable/examples/)。
|
| 65 |
+
* 加入了更丰富的接口,已有接口的参数也有所调整。
|
| 66 |
+
* 上线了 [Pix2Text 在线文档](https://pix2text.readthedocs.io)。
|
| 67 |
+
|
| 68 |
+
## Update 2024.02.26:发布 **V1.0**
|
| 69 |
+
|
| 70 |
+
主要变更:
|
| 71 |
+
|
| 72 |
+
* 数学公式识别(MFR)模型使用新架构,在新的数据集上训练,获得了 SOTA 的精度。具体说明请见:[Pix2Text V1.0 新版发布:最好的开源公式识别模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-v1.0)。
|
| 73 |
+
|
| 74 |
+
了解更多:[RELEASE.md](docs/RELEASE.md) 。
|
| 75 |
+
|
| 76 |
+
<br/>
|
| 77 |
+
|
| 78 |
+
**Pix2Text (P2T)** 期望成为 **[Mathpix](https://mathpix.com/)** 的**免费开源 Python** 替代工具,目前已经可以完成 **Mathpix** 的核心功能。
|
| 79 |
+
**Pix2Text (P2T) 可以识别图片中的版面、表格、图片、文字、数学公式等内容,并整合所有内容后以 Markdown 格式输出。P2T 也可以把一整个 PDF 文件(PDF 的内容可以是扫描图片或者其他任何格式)转换为 Markdown 格式。**
|
| 80 |
+
|
| 81 |
+
**Pix2Text (P2T)** 整合了以下模型:
|
| 82 |
+
|
| 83 |
+
- **版面分析模型**:[breezedeus/pix2text-layout](https://huggingface.co/breezedeus/pix2text-layout) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-layout))。
|
| 84 |
+
- **表格识别模型**:[breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-table-rec))。
|
| 85 |
+
- **文字识别引擎**:支持 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 [CnOCR](https://github.com/breezedeus/cnocr) ,其他语言的识别使用的是开源 OCR 工具 [EasyOCR](https://github.com/JaidedAI/EasyOCR) 。
|
| 86 |
+
- **数学公式检测模型(MFD)**:[breezedeus/pix2text-mfd-1.5](https://huggingface.co/breezedeus/pix2text-mfd-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd-1.5))。基于 [CnSTD](https://github.com/breezedeus/cnstd) 实现。
|
| 87 |
+
- **数学公式识别模型(MFR)**:[breezedeus/pix2text-mfr-1.5](https://huggingface.co/breezedeus/pix2text-mfr-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfr-1.5))。
|
| 88 |
+
|
| 89 |
+
其中多个模型来自其他开源作者, 非常感谢他们的贡献。
|
| 90 |
+
|
| 91 |
+
<div align="center">
|
| 92 |
+
<img src="docs/figs/arch-flow.jpg" alt="Pix2Text Arch Flow"/>
|
| 93 |
+
</div>
|
| 94 |
+
|
| 95 |
+
具体说明请参考:[Pix2Text在线文档/模型](https://pix2text.readthedocs.io/zh-cn/stable/models/)。
|
| 96 |
+
|
| 97 |
+
<br/>
|
| 98 |
+
|
| 99 |
+
P2T 作为Python3工具包,对于不熟悉Python的朋友不太友好,所以我们也发布了**可免费使用**的 **[P2T网页版](https://p2t.breezedeus.com)**,直接把图片丢进网页就能输出P2T的解析结果。**网页版会使用最新的模型,效果会比开源模型更好。**
|
| 100 |
+
|
| 101 |
+
感兴趣的朋友欢迎扫码加小助手为好友,备注 `p2t`,小助手会定期统一邀请大家入群。群内会发布P2T相关工具的最新进展:
|
| 102 |
+
|
| 103 |
+
<div align="center">
|
| 104 |
+
<img src="./docs/figs/wx-qr-code.JPG" alt="微信群二维码" width="300px"/>
|
| 105 |
+
</div>
|
| 106 |
+
|
| 107 |
+
作者也维护 **知识星球** [**P2T/CnOCR/CnSTD私享群**](https://t.zsxq.com/FEYZRJQ) ,这里面的提问会较快得到作者的回复,欢迎加入。**知识星球私享群**也会陆续发布一些P2T/CnOCR/CnSTD相关的私有资料,包括**部分未公开的模型**,**购买付费模型享优惠**,**不同应用场景的调用代码**,使用过程中遇到的难题解答等。星球也会发布P2T/OCR/STD相关的最新研究资料。
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
## 支持的语言列表
|
| 112 |
+
|
| 113 |
+
Pix2Text 的文字识别引擎支持 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 **[CnOCR](https://github.com/breezedeus/cnocr)** ,其他语言的识别使用的是开源 OCR 工具 **[EasyOCR](https://github.com/JaidedAI/EasyOCR)** ,感谢相关的作者们。
|
| 114 |
+
|
| 115 |
+
支持的**语言列表**和**语言代码**如下:
|
| 116 |
+
<details>
|
| 117 |
+
<summary>↓↓↓ Click to show details ↓↓↓</summary>
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
| Language | Code Name |
|
| 121 |
+
| ------------------- | ----------- |
|
| 122 |
+
| Abaza | abq |
|
| 123 |
+
| Adyghe | ady |
|
| 124 |
+
| Afrikaans | af |
|
| 125 |
+
| Angika | ang |
|
| 126 |
+
| Arabic | ar |
|
| 127 |
+
| Assamese | as |
|
| 128 |
+
| Avar | ava |
|
| 129 |
+
| Azerbaijani | az |
|
| 130 |
+
| Belarusian | be |
|
| 131 |
+
| Bulgarian | bg |
|
| 132 |
+
| Bihari | bh |
|
| 133 |
+
| Bhojpuri | bho |
|
| 134 |
+
| Bengali | bn |
|
| 135 |
+
| Bosnian | bs |
|
| 136 |
+
| Simplified Chinese | ch_sim |
|
| 137 |
+
| Traditional Chinese | ch_tra |
|
| 138 |
+
| Chechen | che |
|
| 139 |
+
| Czech | cs |
|
| 140 |
+
| Welsh | cy |
|
| 141 |
+
| Danish | da |
|
| 142 |
+
| Dargwa | dar |
|
| 143 |
+
| German | de |
|
| 144 |
+
| English | en |
|
| 145 |
+
| Spanish | es |
|
| 146 |
+
| Estonian | et |
|
| 147 |
+
| Persian (Farsi) | fa |
|
| 148 |
+
| French | fr |
|
| 149 |
+
| Irish | ga |
|
| 150 |
+
| Goan Konkani | gom |
|
| 151 |
+
| Hindi | hi |
|
| 152 |
+
| Croatian | hr |
|
| 153 |
+
| Hungarian | hu |
|
| 154 |
+
| Indonesian | id |
|
| 155 |
+
| Ingush | inh |
|
| 156 |
+
| Icelandic | is |
|
| 157 |
+
| Italian | it |
|
| 158 |
+
| Japanese | ja |
|
| 159 |
+
| Kabardian | kbd |
|
| 160 |
+
| Kannada | kn |
|
| 161 |
+
| Korean | ko |
|
| 162 |
+
| Kurdish | ku |
|
| 163 |
+
| Latin | la |
|
| 164 |
+
| Lak | lbe |
|
| 165 |
+
| Lezghian | lez |
|
| 166 |
+
| Lithuanian | lt |
|
| 167 |
+
| Latvian | lv |
|
| 168 |
+
| Magahi | mah |
|
| 169 |
+
| Maithili | mai |
|
| 170 |
+
| Maori | mi |
|
| 171 |
+
| Mongolian | mn |
|
| 172 |
+
| Marathi | mr |
|
| 173 |
+
| Malay | ms |
|
| 174 |
+
| Maltese | mt |
|
| 175 |
+
| Nepali | ne |
|
| 176 |
+
| Newari | new |
|
| 177 |
+
| Dutch | nl |
|
| 178 |
+
| Norwegian | no |
|
| 179 |
+
| Occitan | oc |
|
| 180 |
+
| Pali | pi |
|
| 181 |
+
| Polish | pl |
|
| 182 |
+
| Portuguese | pt |
|
| 183 |
+
| Romanian | ro |
|
| 184 |
+
| Russian | ru |
|
| 185 |
+
| Serbian (cyrillic) | rs_cyrillic |
|
| 186 |
+
| Serbian (latin) | rs_latin |
|
| 187 |
+
| Nagpuri | sck |
|
| 188 |
+
| Slovak | sk |
|
| 189 |
+
| Slovenian | sl |
|
| 190 |
+
| Albanian | sq |
|
| 191 |
+
| Swedish | sv |
|
| 192 |
+
| Swahili | sw |
|
| 193 |
+
| Tamil | ta |
|
| 194 |
+
| Tabassaran | tab |
|
| 195 |
+
| Telugu | te |
|
| 196 |
+
| Thai | th |
|
| 197 |
+
| Tajik | tjk |
|
| 198 |
+
| Tagalog | tl |
|
| 199 |
+
| Turkish | tr |
|
| 200 |
+
| Uyghur | ug |
|
| 201 |
+
| Ukranian | uk |
|
| 202 |
+
| Urdu | ur |
|
| 203 |
+
| Uzbek | uz |
|
| 204 |
+
| Vietnamese | vi |
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
> Ref: [Supported Languages](https://www.jaided.ai/easyocr/) .
|
| 208 |
+
|
| 209 |
+
</details>
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
## P2T 网页版
|
| 214 |
+
|
| 215 |
+
所有人都可以免费使用 **[P2T网页版](https://p2t.breezedeus.com)**,每人每天可以免费识别 10000 个字符,正常使用应该够用了。*请不要批量调用接口,机器资源有限,批量调用会导致其他人无法使用服务。*
|
| 216 |
+
|
| 217 |
+
受限于机器资源,网页版当前只支持**简体中文和英文**,要尝试其他语言上的效果,请使用以下的**在线 Demo**。
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
## 在线 Demo 🤗
|
| 222 |
+
|
| 223 |
+
也可以使用 **[在线 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)**(无法科学上网可以使用 [国内镜像](https://hf.qhduan.com/spaces/breezedeus/Pix2Text-Demo)) 尝试 **P2T** 在不同语言上的效果。但在线 Demo 使用的硬件配置较低,速度会较慢。如果是简体中文或者英文图片,建议使用 **[P2T网页版](https://p2t.breezedeus.com)**。
|
| 224 |
+
|
| 225 |
+
## 示例
|
| 226 |
+
|
| 227 |
+
参见:[Pix2Text在线文档/示例](https://pix2text.readthedocs.io/zh-cn/stable/examples/)。
|
| 228 |
+
|
| 229 |
+
## 使用说明
|
| 230 |
+
|
| 231 |
+
参见:[Pix2Text在线文档/使用说明](https://pix2text.readthedocs.io/zh-cn/stable/usage/)。
|
| 232 |
+
|
| 233 |
+
## 模型下载
|
| 234 |
+
|
| 235 |
+
参见:[Pix2Text在线文档/模型](https://pix2text.readthedocs.io/zh-cn/stable/models/)。
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
## 安装
|
| 240 |
+
|
| 241 |
+
嗯,顺利的话一行命令即可。
|
| 242 |
+
|
| 243 |
+
```bash
|
| 244 |
+
pip install pix2text
|
| 245 |
+
```
|
| 246 |
+
|
| 247 |
+
如果需要识别**英文**与**简体中文**之外的文字,请使用以下命令安装额外的包:
|
| 248 |
+
|
| 249 |
+
```bash
|
| 250 |
+
pip install pix2text[multilingual]
|
| 251 |
+
```
|
| 252 |
+
|
| 253 |
+
安装速度慢的话,可以指定国内的安装源,如使用阿里云的安装源:
|
| 254 |
+
|
| 255 |
+
```bash
|
| 256 |
+
pip install pix2text -i https://mirrors.aliyun.com/pypi/simple
|
| 257 |
+
```
|
| 258 |
+
|
| 259 |
+
<br/>
|
| 260 |
+
|
| 261 |
+
更多说明参见:[Pix2Text在线文档/安装](https://pix2text.readthedocs.io/zh-cn/stable/install/)。
|
| 262 |
+
|
| 263 |
+
## 命令行工具
|
| 264 |
+
|
| 265 |
+
参见:[Pix2Text在线文档/命令行工具](https://pix2text.readthedocs.io/zh-cn/stable/command/)。
|
| 266 |
+
|
| 267 |
+
## HTTP 服务
|
| 268 |
+
|
| 269 |
+
参见:[Pix2Text在线文档/命令行工具/开启服务](https://pix2text.readthedocs.io/zh-cn/stable/command/)。
|
| 270 |
+
|
| 271 |
+
## Mac 桌面客户端
|
| 272 |
+
|
| 273 |
+
请参考 [Pix2Text-Mac](https://github.com/breezedeus/Pix2Text-Mac) 安装 Pix2Text 的 MacOS 桌面客户端。
|
| 274 |
+
|
| 275 |
+
<div align="center">
|
| 276 |
+
<img src="https://github.com/breezedeus/Pix2Text-Mac/raw/main/assets/on_menu_bar.jpg" alt="Pix2Text Mac 客户端" width="400px"/>
|
| 277 |
+
</div>
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
## 给作者来杯咖啡
|
| 281 |
+
|
| 282 |
+
开源不易,如果此项目对您有帮助,可以考虑 [给作者加点油🥤,鼓鼓气💪🏻](https://www.breezedeus.com/article/buy-me-coffee) 。
|
| 283 |
+
|
| 284 |
+
---
|
| 285 |
+
|
| 286 |
+
官方代码库:[https://github.com/breezedeus/pix2text](https://github.com/breezedeus/pix2text) 。
|
| 287 |
+
|
| 288 |
+
Pix2Text (P2T) 更多信息:[https://www.breezedeus.com/article/pix2text_cn](https://www.breezedeus.com/article/pix2text_cn) 。
|
docs/RELEASE.md
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Release Notes
|
| 2 |
+
|
| 3 |
+
# Update 2025.07.25: **V1.1.4** Released
|
| 4 |
+
|
| 5 |
+
Major Changes:
|
| 6 |
+
|
| 7 |
+
- Upgraded the Mathematical Formula Detection (MFD) and Mathematical Formula Recognition (MFR) models to version 1.5. All default configurations, documentation, and examples now use `mfd-1.5` and `mfr-1.5` as the standard models.
|
| 8 |
+
|
| 9 |
+
主要变更:
|
| 10 |
+
|
| 11 |
+
- 数学公式检测(MFD)和数学公式识别(MFR)模型升级到 1.5 版本,所有默认配置、文档和示例均以 `mfd-1.5` 和 `mfr-1.5` 为标准模型。
|
| 12 |
+
|
| 13 |
+
# Update 2025.05.06: **V1.1.3.2** Released
|
| 14 |
+
|
| 15 |
+
Major Changes:
|
| 16 |
+
|
| 17 |
+
- Fixed a potential error when processing transparent images, see [#171](https://github.com/breezedeus/Pix2Text/issues/171) for details.
|
| 18 |
+
|
| 19 |
+
主要变更:
|
| 20 |
+
|
| 21 |
+
- 修复了处理透明图片时可能出现的错误,具体见 [#171](https://github.com/breezedeus/Pix2Text/issues/171) 。
|
| 22 |
+
|
| 23 |
+
# Update 2025.04.27: **V1.1.3.1** Released
|
| 24 |
+
|
| 25 |
+
Major Changes:
|
| 26 |
+
|
| 27 |
+
- Bugfix: Fixed the issue of model import related to VLM.
|
| 28 |
+
|
| 29 |
+
主要变更:
|
| 30 |
+
|
| 31 |
+
- 修复了 VLM 相关的模型导入问题。
|
| 32 |
+
|
| 33 |
+
# Update 2025.04.15: **V1.1.3** Released
|
| 34 |
+
|
| 35 |
+
Major Changes:
|
| 36 |
+
|
| 37 |
+
- Support for `VlmTableOCR` and `VlmTextFormulaOCR` models based on the VLM interface (see [LiteLLM documentation](https://docs.litellm.ai/docs/)) allowing the use of closed-source VLM models. Installation command: `pip install pix2text[vlm]`.
|
| 38 |
+
- Usage examples can be found in [tests/test_vlm.py](tests/test_vlm.py) and [tests/test_pix2text.py](tests/test_pix2text.py).
|
| 39 |
+
|
| 40 |
+
主要变更:
|
| 41 |
+
|
| 42 |
+
- 支持基于 VLM 接口(具体参考 [LiteLLM 文档](https://docs.litellm.ai/docs/))的 `VlmTableOCR` 和 `VlmTextFormulaOCR` 模型,可使用闭源 VLM 模型。安装命令:`pip install pix2text[vlm]`。
|
| 43 |
+
- 使用方式见 [tests/test_vlm.py](tests/test_vlm.py) 和 [tests/test_pix2text.py](tests/test_pix2text.py)。
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# Update 2024.12.17: **V1.1.2.3** Released
|
| 47 |
+
|
| 48 |
+
Major Changes:
|
| 49 |
+
|
| 50 |
+
- Bugfix: Fixed issues related to downloading models on Windows.
|
| 51 |
+
|
| 52 |
+
主要变更:
|
| 53 |
+
|
| 54 |
+
- 修复了在 Windows 环境下下载模型的问题。
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# Update 2024.12.11: **V1.1.2.2** Released
|
| 58 |
+
|
| 59 |
+
Major Changes:
|
| 60 |
+
|
| 61 |
+
- Bugfix: Resolved issues related to serialization errors when handling ONNX Runtime session options by ensuring that non-serializable configurations are managed appropriately.
|
| 62 |
+
|
| 63 |
+
主要变更:
|
| 64 |
+
|
| 65 |
+
- 修复了与 ONNX Runtime session options 相关的序列化错误,通过确保不可序列化的配置信息在适当的管理下进行处理。
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# Update 2024.12.02: **V1.1.2.1** Released
|
| 69 |
+
|
| 70 |
+
Major Changes:
|
| 71 |
+
|
| 72 |
+
* Fixed an error in `fetch_column_info()@DocYoloLayoutParser`, thanks to Bin.
|
| 73 |
+
|
| 74 |
+
主要变更:
|
| 75 |
+
|
| 76 |
+
* 修复了 fetch_column_info()@DocYoloLayoutParser 中的错误,感谢网友 Bin 。
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# Update 2024.11.17: **V1.1.2** Released
|
| 80 |
+
|
| 81 |
+
Major Changes:
|
| 82 |
+
|
| 83 |
+
* A new layout analysis model [DocLayout-YOLO](https://github.com/opendatalab/DocLayout-YOLO) has been integrated, improving the accuracy of layout analysis.
|
| 84 |
+
* Bug fixes:
|
| 85 |
+
* When the text language is set to English only, a dedicated English OCR model is used to avoid including Chinese in the output.
|
| 86 |
+
* The processing logic for PNG images has been optimized, enhancing recognition performance.
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
主要变更:
|
| 90 |
+
|
| 91 |
+
* 版面分析模型加入 [DocLayout-YOLO](https://github.com/opendatalab/DocLayout-YOLO),提升版面分析的准确性。
|
| 92 |
+
* 修复 bugs:
|
| 93 |
+
* 在设置文本语言只有英语时,使用专门的英文 OCR 模型,避免输出中包含中文。
|
| 94 |
+
* 对 PNG 图片的处理逻辑进行了优化,提升了识别效果。
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
# Update 2024.07.18: **V1.1.1.2** Released
|
| 98 |
+
|
| 99 |
+
Major Changes:
|
| 100 |
+
|
| 101 |
+
* fix bugs:
|
| 102 |
+
* https://github.com/breezedeus/Pix2Text/issues/129
|
| 103 |
+
* https://github.com/breezedeus/Pix2Text/issues/116
|
| 104 |
+
|
| 105 |
+
主要变更:
|
| 106 |
+
|
| 107 |
+
* 修复 bugs:
|
| 108 |
+
* https://github.com/breezedeus/Pix2Text/issues/129
|
| 109 |
+
* https://github.com/breezedeus/Pix2Text/issues/116
|
| 110 |
+
|
| 111 |
+
# Update 2024.06.24: **V1.1.1.1** Released
|
| 112 |
+
|
| 113 |
+
Major Changes:
|
| 114 |
+
|
| 115 |
+
* Added a new parameter `static_resized_shape` when initializing `MathFormulaDetector`, which is used to resize the input image to a fixed size. Some formats of models require fixed-size input images during inference, such as `CoreML`.
|
| 116 |
+
|
| 117 |
+
主要变更:
|
| 118 |
+
|
| 119 |
+
* `MathFormulaDetector` 初始化时加入了参数 `static_resized_shape`, 用于把输入图片 resize 为固定大小。某些格式的模型在推理时需要固定大小的输入图片,如 `CoreML`。
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
## Update 2024.06.18: **V1.1.1** Released
|
| 123 |
+
|
| 124 |
+
Major changes:
|
| 125 |
+
|
| 126 |
+
* Support the new mathematical formula detection models (MFD): [breezedeus/pix2text-mfd](https://huggingface.co/breezedeus/pix2text-mfd) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfd)), which significantly improves the accuracy of formula detection.
|
| 127 |
+
|
| 128 |
+
See details: [Pix2Text V1.1.1 Released, Bringing Better Mathematical Formula Detection Models | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-v1.1.1).
|
| 129 |
+
|
| 130 |
+
主要变更:
|
| 131 |
+
|
| 132 |
+
* 支持新的数学公式检测模型(MFD):[breezedeus/pix2text-mfd](https://huggingface.co/breezedeus/pix2text-mfd) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd)),公式检测精度获得较大提升。
|
| 133 |
+
|
| 134 |
+
具体说明请见:[Pix2Text V1.1.1 发布,带来更好的数学公式检测模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-v1.1.1)。
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
## Update 2024.06.17:**V1.1.0.7** Released
|
| 138 |
+
|
| 139 |
+
Major changes:
|
| 140 |
+
|
| 141 |
+
* adapted with cnstd>=1.2.4, thanks to [@g1y5x3](https://github.com/g1y5x3) .
|
| 142 |
+
|
| 143 |
+
主要变更:
|
| 144 |
+
|
| 145 |
+
* 适配 cnstd>=1.2.4 ,感谢 [@g1y5x3](https://github.com/g1y5x3) 。
|
| 146 |
+
|
| 147 |
+
## Update 2024.06.04:**V1.1.0.6** Released
|
| 148 |
+
|
| 149 |
+
Major changes:
|
| 150 |
+
|
| 151 |
+
* Fix: The Text OCR incorrectly carried over the configuration from previous calls when it was called multiple times.
|
| 152 |
+
|
| 153 |
+
主要变更:
|
| 154 |
+
|
| 155 |
+
* 修复 bug:Text OCR 多次调用时错误沿用了之前的配置信息。
|
| 156 |
+
|
| 157 |
+
## Update 2024.05.27:**V1.1.0.5** Released
|
| 158 |
+
|
| 159 |
+
Major changes:
|
| 160 |
+
|
| 161 |
+
* Fixed bugs such as that in `._parse_remaining`.
|
| 162 |
+
|
| 163 |
+
主要变更:
|
| 164 |
+
|
| 165 |
+
* 修复 `._parse_remaining` 等 bug。
|
| 166 |
+
|
| 167 |
+
## Update 2024.05.20:**V1.1.0.4** Released
|
| 168 |
+
|
| 169 |
+
Major changes:
|
| 170 |
+
|
| 171 |
+
* set `table_as_image` as `True` if `self.table_ocr` is not available.
|
| 172 |
+
* fix typo: https://github.com/breezedeus/Pix2Text/pull/108 . Thanks to [@billvsme](https://github.com/billvsme).
|
| 173 |
+
|
| 174 |
+
主要变更:
|
| 175 |
+
|
| 176 |
+
* 如果 `self.table_ocr` 不可用,将 `table_as_image` 设置为 `True`。
|
| 177 |
+
* 修复拼写错误:https://github.com/breezedeus/Pix2Text/pull/108 。感谢 [@billvsme](https://github.com/billvsme)。
|
| 178 |
+
|
| 179 |
+
## Update 2024.05.19:**V1.1.0.3** Released
|
| 180 |
+
|
| 181 |
+
Major changes:
|
| 182 |
+
|
| 183 |
+
* A new paid model, `mfr-plus`, has been added, which offers better recognition for multi-line formulas.
|
| 184 |
+
* When recognizing only English, CnOCR does not output Chinese.
|
| 185 |
+
* Bugs have been fixed.
|
| 186 |
+
|
| 187 |
+
主要变更:
|
| 188 |
+
|
| 189 |
+
* 加入新的付费模型:`mfr-plus`,对多行公式的识别效果更好。
|
| 190 |
+
* 在只识别英文时,CnOCR 不输出中文。
|
| 191 |
+
* 修复 bugs。
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
## Update 2024.05.10:**V1.1.0.2** Released
|
| 195 |
+
|
| 196 |
+
Major changes:
|
| 197 |
+
|
| 198 |
+
* Fixed the error caused by empty lines in `merge_line_texts`.
|
| 199 |
+
|
| 200 |
+
主要变更:
|
| 201 |
+
|
| 202 |
+
* 修复 `merge_line_texts` 中空行导致的错误。
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
## Update 2024.04.30:**V1.1.0.1** Released
|
| 206 |
+
|
| 207 |
+
Major changes:
|
| 208 |
+
|
| 209 |
+
* Fix the exception occurring when saving files on Windows.
|
| 210 |
+
|
| 211 |
+
主要变更:
|
| 212 |
+
|
| 213 |
+
* 修复 Windows 下存储文件时出现的异常。
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
## Update 2024.04.28:**V1.1** Released
|
| 217 |
+
|
| 218 |
+
Major changes:
|
| 219 |
+
|
| 220 |
+
* Added layout analysis and table recognition models, supporting the conversion of images with complex layouts into Markdown format. See examples: [Pix2Text Online Documentation / Examples](https://pix2text.readthedocs.io/zh-cn/latest/examples_en/).
|
| 221 |
+
* Added support for converting entire PDF files to Markdown format. See examples: [Pix2Text Online Documentation / Examples](https://pix2text.readthedocs.io/zh-cn/latest/examples_en/).
|
| 222 |
+
* Enhanced the interface with more features, including adjustments to existing interface parameters.
|
| 223 |
+
* Launched the [Pix2Text Online Documentation](https://pix2text.readthedocs.io).
|
| 224 |
+
|
| 225 |
+
主要变更:
|
| 226 |
+
|
| 227 |
+
* 加入了版面分析和表格识别模型,支持把复杂排版的图片转换为 Markdown 格式,示例见:[Pix2Text 在线文档/Examples](https://pix2text.readthedocs.io/zh-cn/latest/examples/)。
|
| 228 |
+
* 支持把整个 PDF 文件转换为 Markdown 格式,示例见:[Pix2Text 在线文档/Examples](https://pix2text.readthedocs.io/zh-cn/latest/examples/)。
|
| 229 |
+
* 加入了更丰富的接口,已有接口的参数也有所调整。
|
| 230 |
+
* 上线了 [Pix2Text 在线文档](https://pix2text.readthedocs.io)。
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
## Update 2024.03.30:**V1.0.2.3** Released
|
| 234 |
+
|
| 235 |
+
Major changes:
|
| 236 |
+
|
| 237 |
+
* Fixed the issue caused by `merge_line_texts`, see details at: https://github.com/breezedeus/Pix2Text/issues/84.
|
| 238 |
+
* Optimized the post-processing logic to handle some abnormal sequences.
|
| 239 |
+
|
| 240 |
+
主要变更:
|
| 241 |
+
|
| 242 |
+
* 修复 `merge_line_texts` 带来的错误,具体见:https://github.com/breezedeus/Pix2Text/issues/84 。
|
| 243 |
+
* 优化了后处理逻辑,处理部分不正常的序列。
|
| 244 |
+
|
| 245 |
+
## Update 2024.03.18:**V1.0.2.2** Released
|
| 246 |
+
|
| 247 |
+
Major changes:
|
| 248 |
+
|
| 249 |
+
* The previously used `output_logits` argument is incompatible with transformers < 4.38.0, replaced by the `output_scores` argument. https://github.com/breezedeus/Pix2Text/issues/81
|
| 250 |
+
* Fixed a bug in `serve.py` that was not compatible with the new pix2text version.
|
| 251 |
+
|
| 252 |
+
主要变更:
|
| 253 |
+
|
| 254 |
+
* 之前使用的 `output_logits` 参数不兼容 transformers < 4.38.0,换为 `output_scores` 参数。 https://github.com/breezedeus/Pix2Text/issues/81
|
| 255 |
+
* 修复 `serve.py` 中未兼容新版接口的 bug。
|
| 256 |
+
|
| 257 |
+
## Update 2024.03.15:**V1.0.2.1** Released
|
| 258 |
+
|
| 259 |
+
Major Changes:
|
| 260 |
+
|
| 261 |
+
* Fixed mishandling of LaTeX expressions during post-processing, such as replacing `\rightarrow` with `arrow`.
|
| 262 |
+
* Added `rec_config` parameter to `.recognize_text()` and `.recognize_formula()` methods for passing additional parameters for recognition.
|
| 263 |
+
|
| 264 |
+
主要变更:
|
| 265 |
+
|
| 266 |
+
* 修复对 LaTeX 表达式进行后处理时引入的误操作,如 `\rightarrow` 被替换为 `arrow`。
|
| 267 |
+
* 对 `.recognize_text()` 和 `.recognize_formula()` 加入了 `rec_config` 参数,以便传入用于识别的额外参数。
|
| 268 |
+
|
| 269 |
+
## Update 2024.03.14:**V1.0.2** Released
|
| 270 |
+
|
| 271 |
+
Major Changes:
|
| 272 |
+
|
| 273 |
+
* Optimized the recognition process, improving the recognition of boundary punctuation that may have been missed before.
|
| 274 |
+
* Enhanced the LaTeX recognition results by restoring the formula tags to the formulas.
|
| 275 |
+
* Adjusted the output format of the recognition results, adding the `return_text` parameter to control whether to return only text or more detailed information. When returning more detailed information, confidence score `score` and position information `position` will also be provided. Thanks to [@hiroi-sora](https://github.com/hiroi-sora) for the suggestion: https://github.com/breezedeus/Pix2Text/issues/67.
|
| 276 |
+
|
| 277 |
+
主要变更:
|
| 278 |
+
|
| 279 |
+
* 优化了识别的逻辑,以前可能漏识的边界标点现在可以比较好的识别。
|
| 280 |
+
* 对 Latex 识别结果进行了优化,把公式的 tag 还原到公式中。
|
| 281 |
+
* 调整了识别结果的输出格式,增加了参数 `return_text` 来控制结果是只返回文本还是更丰富的信息。当返回更丰富信息时,会返回置信度 `score` 以及位置信息 `position`。感谢 [@hiroi-sora](https://github.com/hiroi-sora) 的建议:https://github.com/breezedeus/Pix2Text/issues/67 。
|
| 282 |
+
|
| 283 |
+
## Update 2024.03.03:发布 **V1.0.1**
|
| 284 |
+
|
| 285 |
+
主要变更:
|
| 286 |
+
|
| 287 |
+
* 修复在 CUDA 环境下使用 `LatexOCR` 时出现的错误,具体见:https://github.com/breezedeus/Pix2Text/issues/65#issuecomment-1973037910 ,感谢 [@MSZ-006NOC](https://github.com/MSZ-006NOC)。
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
## Update 2024.02.26:发布 **V1.0**
|
| 291 |
+
|
| 292 |
+
主要变更:
|
| 293 |
+
|
| 294 |
+
* 数学公式识别(MFR)模型使用新架构,在新的数据集上训练,获得了 SOTA 的精度。具体说明请见:[Pix2Text V1.0 新版发布:最好的开源公式识别模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-v1.0)。
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
## Update 2024.01.10:发布 **V0.3**
|
| 298 |
+
|
| 299 |
+
主要变更:
|
| 300 |
+
|
| 301 |
+
* 支持识别 **`80+` 种语言**,详细语言列表见 [支持的语言列表](./README_cn.md#支持的语言列表);
|
| 302 |
+
|
| 303 |
+
* 模型自动下载增加国内站点;
|
| 304 |
+
|
| 305 |
+
* 优化对检测 boxes 的合并逻辑。
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
## Update 2023.12.21:发布 **V0.2.3.3**
|
| 310 |
+
|
| 311 |
+
主要变更:
|
| 312 |
+
|
| 313 |
+
* fix: bugfixed from [@hiroi-sora](https://github.com/hiroi-sora) , thanks much.
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
## Update 2023.09.10:发布 **V0.2.3.2**
|
| 318 |
+
|
| 319 |
+
主要变更:
|
| 320 |
+
* fix: 去掉 `consts.py` 无用的 `CATEGORY_MAPPINGS`。
|
| 321 |
+
|
| 322 |
+
## Update 2023.07.14:发布 **V0.2.3.1**
|
| 323 |
+
|
| 324 |
+
主要变更:
|
| 325 |
+
* 修复了 `self.recognize_by_clf` 返回结果中不包含 `line_number` 字段导致 `merge_line_texts` 报错的bug。
|
| 326 |
+
|
| 327 |
+
## Update 2023.07.03:发布 **V0.2.3**
|
| 328 |
+
|
| 329 |
+
主要变更:
|
| 330 |
+
* 优化了对检测出的boxes的排序逻辑,以及对混合图片的处理逻辑,使得最终识别效果更符合直觉。具体参考:[Pix2Text 新版公式识别模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-20230702) 。
|
| 331 |
+
* 修复了模型文件自动下载的功能。HuggingFace似乎对下载文件的逻辑做了调整,导致之前版本的自动下载失败,当前版本已修复。但由于HuggingFace国内被墙,国内下载仍需 **梯子(VPN)**。
|
| 332 |
+
* 更新了各个依赖包的版本号。
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
## Update 2023.06.20:发布新版 MFD 模型
|
| 336 |
+
|
| 337 |
+
主要变更:
|
| 338 |
+
* 基于新标注的数据,重新训练了 **MFD YoloV7** 模型,目前新模型已部署到 [P2T网页版](https://p2t.breezedeus.com) 。具体说明见:[Pix2Text (P2T) 新版公式检测模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-20230613) 。
|
| 339 |
+
* 之前的 MFD YoloV7 模型已开放给星球会员下载,具体说明见:[P2T YoloV7 数学公式检测模型开放给星球会员下载 | Breezedeus.com](https://www.breezedeus.com/article/p2t-yolov7-for-zsxq-20230619) 。
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
## Update 2023.02.19:发布 **V0.2.2.1**
|
| 343 |
+
|
| 344 |
+
主要变更:
|
| 345 |
+
* 修复bug。
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
## Update 2023.02.19:发布 **V0.2.2**
|
| 349 |
+
|
| 350 |
+
主要变更:
|
| 351 |
+
* 修复旋转框导致的识别结果错误;
|
| 352 |
+
* 去掉代码中不小心包含的 `breakpoint()`。
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
## [Yanked] Update 2023.02.19:发布 **V0.2.1**
|
| 356 |
+
|
| 357 |
+
主要变更:
|
| 358 |
+
* 增加后处理机制优化Latex-OCR的识别结果;
|
| 359 |
+
* 使用最新的 [CnSTD](https://github.com/breezedeus/cnstd) 和 [CnOCR](https://github.com/breezedeus/cnocr),它们修复了一些bug。
|
| 360 |
+
|
| 361 |
+
## Update 2023.02.03:发布 **V0.2**
|
| 362 |
+
|
| 363 |
+
主要变更:
|
| 364 |
+
* 利用 **[CnSTD](https://github.com/breezedeus/cnstd)** 新版的**数学公式检测**(**Mathematical Formula Detection**,简称 **MFD**)能力,**P2T V0.2** 支持**识别既包含文字又包含公式的混合图片**。
|
| 365 |
+
|
| 366 |
+
## Update 2022.10.21:发布 V0.1.1
|
| 367 |
+
|
| 368 |
+
主要变更:
|
| 369 |
+
* Fix: remove the character which causes error on Windows
|
| 370 |
+
|
| 371 |
+
## Update 2022.09.11:发布 V0.1
|
| 372 |
+
* 初版发布
|
docs/buymeacoffee.md
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 给作者加油 (Sponsor the Author)🥤
|
| 2 |
+
|
| 3 |
+
虽然AI技术偶尔被用于作恶,但我更相信它能给人类和其他生命带来温暖。这是我创建和持续优化这些开源项目的最大动力。它们不是为了展示技术的强大,而是为了给有需要的人带来方便和帮助。通过对这些项目的捐赠,您可以和我一道让AI为更多人带来温暖和美好。
|
| 4 |
+
|
| 5 |
+
My unwavering love for artificial intelligence technology drives me to constantly seek new challenges and opportunities. This is why I have created these open-sourced projects, which aim not just to demonstrate technical prowess, but more importantly, to bring convenience and help to those who need it. I truly believe that these projects have the power to change lives for the better. Seeing the positive impact of my work fills me with a sense of happiness and pride that fuels my drive to continue creating and innovating.
|
| 6 |
+
|
| 7 |
+
By supporting my projects through a donation, you can be a part of this journey and help me bring more warmth and humanity to the world of AI.
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
## 1. 知识星球
|
| 11 |
+
|
| 12 |
+
欢迎加入**知识星球** **[P2T/CnOCR/CnSTD私享群](https://t.zsxq.com/FEYZRJQ)**。**知识星球私享群**会陆续发布一些 CnOCR/CnSTD/P2T 相关的私有资料。
|
| 13 |
+
关于星球会员享受福利的更详细说明请参考:[知识星球 | Breezedeus.com](https://www.breezedeus.com/article/zsxq)。
|
| 14 |
+
|
| 15 |
+
<figure markdown>
|
| 16 |
+
{: style="width:280px"}
|
| 17 |
+
</figure>
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
## 2. 支付宝打赏 (Alipay reward)
|
| 21 |
+
|
| 22 |
+
通过**支付宝**给作者打赏。
|
| 23 |
+
Give the author a reward through Alipay.
|
| 24 |
+
|
| 25 |
+
<figure markdown>
|
| 26 |
+
{: style="width:280px"}
|
| 27 |
+
</figure>
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
## 3. Buy me a Coffee
|
| 31 |
+
If you are not in mainland China, you can also support the author through:
|
| 32 |
+
|
| 33 |
+
<div align="center">
|
| 34 |
+
<a href="https://www.buymeacoffee.com/breezedeus2" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 60px !important;width: 217px !important;" ></a>
|
| 35 |
+
</div>
|
docs/command.md
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 脚本工具
|
| 2 |
+
|
| 3 |
+
Python 包 **pix2text** 自带了命令行工具 `p2t`,[安装](install.md) 后即可使用。`p2t` 包含了以下几个子命令。
|
| 4 |
+
|
| 5 |
+
## 预测
|
| 6 |
+
|
| 7 |
+
使用命令 **`p2t predict`** 预测单个(图片或 PDF)文件或文件夹中所有图片(不支持同时预测多个 PDF 文件),以下是使用说明:
|
| 8 |
+
|
| 9 |
+
```bash
|
| 10 |
+
$ p2t predict -h
|
| 11 |
+
Usage: p2t predict [OPTIONS]
|
| 12 |
+
|
| 13 |
+
使用Pix2Text(P2T)来预测图像或 PDF 文件中的文本信息
|
| 14 |
+
|
| 15 |
+
选项:
|
| 16 |
+
-l,--languages TEXT Text-OCR识别的语言代码,用逗号分隔,默认为en,ch_sim
|
| 17 |
+
--layout-config TEXT 布局解析器模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置
|
| 18 |
+
--mfd-config TEXT MFD模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置
|
| 19 |
+
--formula-ocr-config TEXT Latex-OCR数学公式识别模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置
|
| 20 |
+
--text-ocr-config TEXT Text-OCR识别的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置
|
| 21 |
+
--enable-formula / --disable-formula
|
| 22 |
+
是否启用公式识别,默认值:启用公式
|
| 23 |
+
--enable-table / --disable-table
|
| 24 |
+
是否启用表格识别,默认值:启用表格
|
| 25 |
+
-d, --device TEXT 选择使用`cpu`、`gpu`或指定的GPU,如`cuda:0`。默认值:cpu
|
| 26 |
+
--file-type [pdf|page|text_formula|formula|text]
|
| 27 |
+
要处理的文件类型,'pdf'、'page'、'text_formula'、'formula'或'text'。默认值:text_formula
|
| 28 |
+
--resized-shape INTEGER 在处理之前将图像宽度调整为此大小。默认值:768
|
| 29 |
+
-i, --img-file-or-dir TEXT 输入图像/pdf的文件路径或指定的目录。[必需]
|
| 30 |
+
--save-debug-res TEXT 如果设置了`save_debug_res`,则保存调试结果的目录;默认值为`None`,表示不保存
|
| 31 |
+
--rec-kwargs TEXT 用于调用`.recognize()`的kwargs,以JSON字符串格式提供
|
| 32 |
+
--return-text / --no-return-text
|
| 33 |
+
是否仅返回文本结果,默认值:返回文本
|
| 34 |
+
--auto-line-break / --no-auto-line-break
|
| 35 |
+
是否自动确定是否将相邻的行结果合并为单个行结果,默认值:自动换行
|
| 36 |
+
-o, --output-dir TEXT 识别文本结果的输出目录。仅在`file-type`为`pdf`或`page`时有效。默认值:output-md
|
| 37 |
+
--log-level TEXT 日志级别,例如`INFO`、`DEBUG`。默认值:INFO
|
| 38 |
+
-h, --help 显示此消息并退出。
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
### 示例 1
|
| 42 |
+
使用基础模型进行预测:
|
| 43 |
+
|
| 44 |
+
```bash
|
| 45 |
+
p2t predict -l en,ch_sim --resized-shape 768 --file-type pdf -i docs/examples/test-doc.pdf -o output-md --save-debug-res output-debug
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
它会把识别结果(Markdown格式)存放在 `output-md` 目录下,并把中间的解析结果存放在 `output-debug` 目录下,以便分析识别结果主要受哪个模型的影响。
|
| 49 |
+
如果不需要保存中间解析结果,可以去掉 `--save-debug-res output-debug` 参数。
|
| 50 |
+
|
| 51 |
+
### 示例 2
|
| 52 |
+
|
| 53 |
+
预测时也支持使用自定义的参数或模型。例如,使用自定义的模型进行预测:
|
| 54 |
+
|
| 55 |
+
```bash
|
| 56 |
+
p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --rec-kwargs '{"page_numbers": [0, 1]}' --resized-shape 768 --file-type pdf -i docs/examples/test-doc.pdf -o output-md --save-debug-res output-debug
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
## 开启服务
|
| 61 |
+
|
| 62 |
+
使用命令 **`p2t serve`** 开启一个 HTTP 服务,用于接收图片(当前不支持 PDF)并返回识别结果。
|
| 63 |
+
这个 HTTP 服务是基于 FastAPI 实现的,以下是使用说明:
|
| 64 |
+
|
| 65 |
+
```bash
|
| 66 |
+
$ p2t serve -h
|
| 67 |
+
Usage: p2t serve [OPTIONS]
|
| 68 |
+
|
| 69 |
+
启动HTTP服务。
|
| 70 |
+
|
| 71 |
+
选项:
|
| 72 |
+
-l, --languages TEXT Text-OCR识别的语言代码,用逗号分隔,默认为en,ch_sim
|
| 73 |
+
--layout-config TEXT 布局解析器模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置
|
| 74 |
+
--mfd-config TEXT MFD模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置
|
| 75 |
+
--formula-ocr-config TEXT Latex-OCR数学公式识别模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置
|
| 76 |
+
--text-ocr-config TEXT Text-OCR识别的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置
|
| 77 |
+
--enable-formula / --disable-formula
|
| 78 |
+
是否启用公式识别,默认值:启用公式
|
| 79 |
+
--enable-table / --disable-table
|
| 80 |
+
是否启用表格识别,默认值:启用表格
|
| 81 |
+
-d, --device TEXT 选择使用`cpu`、`gpu`或指定的GPU,如`cuda:0`。默认值:cpu
|
| 82 |
+
-o, --output-md-root-dir TEXT Markdown输出的根目录,用于存放识别文本结果。仅在`file-type`为`pdf`或`page`时有效。默认值:output-md-root
|
| 83 |
+
-H, --host TEXT 服务器主机 [默认值:0.0.0.0]
|
| 84 |
+
-p, --port INTEGER 服务器端口 [默认值:8503]
|
| 85 |
+
--reload 当代码发生更改时是否重新加载服务器
|
| 86 |
+
--log-level TEXT 日志级别,例如`INFO`、`DEBUG`。默认值:INFO
|
| 87 |
+
-h, --help 显示此消息并退出。
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
### 示例 1
|
| 91 |
+
使用基础模型进行预测:
|
| 92 |
+
|
| 93 |
+
```bash
|
| 94 |
+
p2t serve -l en,ch_sim -H 0.0.0.0 -p 8503
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
### 示例 2
|
| 98 |
+
|
| 99 |
+
服务开启时也支持使用自定义的参数或模型。例如,使用自定义的模型进行预测:
|
| 100 |
+
|
| 101 |
+
```bash
|
| 102 |
+
p2t serve -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' -H 0.0.0.0 -p 8503
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
### 服务调用
|
| 106 |
+
|
| 107 |
+
#### Python
|
| 108 |
+
开启后可以使用以下方式调用命令(Python):
|
| 109 |
+
|
| 110 |
+
```python
|
| 111 |
+
import requests
|
| 112 |
+
|
| 113 |
+
url = 'http://0.0.0.0:8503/pix2text'
|
| 114 |
+
|
| 115 |
+
image_fp = 'docs/examples/page2.png'
|
| 116 |
+
data = {
|
| 117 |
+
"file_type": "page",
|
| 118 |
+
"resized_shape": 768,
|
| 119 |
+
"embed_sep": " $,$ ",
|
| 120 |
+
"isolated_sep": "$$\n, \n$$"
|
| 121 |
+
}
|
| 122 |
+
files = {
|
| 123 |
+
"image": (image_fp, open(image_fp, 'rb'), 'image/jpeg')
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
r = requests.post(url, data=data, files=files)
|
| 127 |
+
|
| 128 |
+
outs = r.json()['results']
|
| 129 |
+
out_md_dir = r.json()['output_dir']
|
| 130 |
+
if isinstance(outs, str):
|
| 131 |
+
only_text = outs
|
| 132 |
+
else:
|
| 133 |
+
only_text = '\n'.join([out['text'] for out in outs])
|
| 134 |
+
print(f'{only_text=}')
|
| 135 |
+
print(f'{out_md_dir=}')
|
| 136 |
+
```
|
| 137 |
+
|
| 138 |
+
#### Curl
|
| 139 |
+
|
| 140 |
+
也可以使用 curl 调用服务:
|
| 141 |
+
|
| 142 |
+
```bash
|
| 143 |
+
curl -X POST \
|
| 144 |
+
-F "file_type=page" \
|
| 145 |
+
-F "resized_shape=768" \
|
| 146 |
+
-F "embed_sep= $,$ " \
|
| 147 |
+
-F "isolated_sep=$$\n, \n$$" \
|
| 148 |
+
-F "image=@docs/examples/page2.png;type=image/jpeg" \
|
| 149 |
+
http://0.0.0.0:8503/pix2text
|
| 150 |
+
```
|
docs/contact.md
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# 交流群
|
| 3 |
+
可通过以下方式与作者 [breezedeus](https://github.com/breezedeus) 进行沟通,也欢迎反馈使用过程中遇到的问题。
|
| 4 |
+
|
| 5 |
+
## 一、知识星球 [**P2T/CnOCR/CnSTD私享群**](https://t.zsxq.com/FEYZRJQ)
|
| 6 |
+
|
| 7 |
+
作者维护 **知识星球** [**P2T/CnOCR/CnSTD私享群**](https://t.zsxq.com/FEYZRJQ) ,欢迎加入。**知识星球私享群**会陆续发布一些 P2T/CnOCR/CnSTD 相关的私有资料。
|
| 8 |
+
关于星球会员享受福利的更详细说明请参考:[知识星球 | Breezedeus.com](https://www.breezedeus.com/article/zsxq)。
|
| 9 |
+
|
| 10 |
+
<figure markdown>
|
| 11 |
+
{: style="width:280px"}
|
| 12 |
+
</figure>
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
## 二、微信交流群
|
| 16 |
+
|
| 17 |
+
扫码加小助手为好友,备注 `p2t`,小助手会定期统一邀请大家入群:
|
| 18 |
+
|
| 19 |
+
<figure markdown>
|
| 20 |
+
{: style="width:270px"}
|
| 21 |
+
</figure>
|
| 22 |
+
|
| 23 |
+
正常情况小助手会定期邀请入群,但无法保证时间。如果期望尽快得到答复,可以加入上面的知识星球 [**P2T/CnOCR/CnSTD私享群**](https://t.zsxq.com/FEYZRJQ) 。
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
## 三、Discord
|
| 27 |
+
|
| 28 |
+
欢迎加入 [**Pix2Text Discord 服务器**](https://discord.gg/GgD87WM8Tf) 。
|
| 29 |
+
|
| 30 |
+
Welcome to join [**Pix2Text Discord Server**](https://discord.gg/GgD87WM8Tf) .
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
## 四、邮件 / Email
|
| 34 |
+
|
| 35 |
+
**邮箱**:breezedeus AT gmail.com,看的不勤,除非其他方式联系不上。
|
| 36 |
+
|
| 37 |
+
Email: breezedeus AT gmail.com .
|
docs/demo.md
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## P2T 网页版
|
| 2 |
+
|
| 3 |
+
所有人都可以免费使用 **[P2T网页版](https://p2t.breezedeus.com)**,每人每天可以免费识别 10000 个字符,正常使用应该够用了。如果无法打开,请尝试科学上网。*请不要批量调用接口,机器资源有限,批量调用会导致其他人无法使用服务。*
|
| 4 |
+
|
| 5 |
+
受限于机器资源,网页版当前只支持**简体中文和英文**,要尝试其他语言上的效果,请使用以下的**在线 Demo**。
|
| 6 |
+
|
| 7 |
+
<figure markdown>
|
| 8 |
+

|
| 9 |
+
</figure>
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
## 在线 Demo 🤗
|
| 13 |
+
|
| 14 |
+
也可以使用 **[在线 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)**(无法科学上网可以使用 [国内镜像](https://hf.qhduan.com/spaces/breezedeus/Pix2Text-Demo)) 尝试 **P2T** 在不同语言上的效果。但在线 Demo 使用的硬件配置较低,速度会较慢。如果是简体中文或者英文图片,建议使用 **[P2T网页版](https://p2t.breezedeus.com)**。
|
| 15 |
+
|
| 16 |
+
<figure markdown>
|
| 17 |
+

|
| 18 |
+
</figure>
|
| 19 |
+
|
| 20 |
+
更多说明请参考 [Pix2Text 主页](https://www.breezedeus.com/article/pix2text_cn) 。
|
docs/examples.md
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<figure markdown>
|
| 2 |
+
|
| 3 |
+
[English](examples_en.md) | 中文
|
| 4 |
+
|
| 5 |
+
</figure>
|
| 6 |
+
|
| 7 |
+
# 示例
|
| 8 |
+
## 识别 PDF 文件,返回其 Markdown 格式
|
| 9 |
+
|
| 10 |
+
对于 PDF 文件,可以使用函数 `.recognize_pdf()` 对整个文件或者指定页进行识别,并把结果输出为 Markdown 文件。如针对以下 PDF 文件 ([examples/test-doc.pdf](examples/test-doc.pdf)),
|
| 11 |
+
调用方式如下:
|
| 12 |
+
|
| 13 |
+
```python
|
| 14 |
+
from pix2text import Pix2Text
|
| 15 |
+
|
| 16 |
+
img_fp = './examples/test-doc.pdf'
|
| 17 |
+
p2t = Pix2Text.from_config()
|
| 18 |
+
doc = p2t.recognize_pdf(img_fp, page_numbers=[0, 1])
|
| 19 |
+
doc.to_markdown('output-md') # 导出的 Markdown 信息保存在 output-md 目录中
|
| 20 |
+
```
|
| 21 |
+
|
| 22 |
+
也可以使用命令行完成一样的功能,如下面命令使用了付费版模型(MFD + MFR + CnOCR 三个付费模型)进行识别:
|
| 23 |
+
|
| 24 |
+
```bash
|
| 25 |
+
p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --rec-kwargs '{"page_numbers": [0, 1]}' --resized-shape 768 --file-type pdf -i docs/examples/test-doc.pdf -o output-md --save-debug-res output-debug
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
识别结果见 [output-md/output.md](output-md/output.md)。
|
| 29 |
+
|
| 30 |
+
<br/>
|
| 31 |
+
|
| 32 |
+
> 如果期望导出 Markdown 之外的其他格式,如 Word、HTML、PDF 等,推荐使用工具 [Pandoc](https://pandoc.org) 对 Markdown 结果进行转换即可。
|
| 33 |
+
|
| 34 |
+
## 识别带有复杂排版的图片
|
| 35 |
+
可以使用函数 `.recognize_page()` 识别图片中的文字和数学公式。如针对以下图片 ([examples/page2.png](examples/page2.png)):
|
| 36 |
+
|
| 37 |
+
<figure markdown>
|
| 38 |
+
{: style="width:600px"}
|
| 39 |
+
</figure>
|
| 40 |
+
|
| 41 |
+
调用方式如下:
|
| 42 |
+
|
| 43 |
+
```python
|
| 44 |
+
from pix2text import Pix2Text
|
| 45 |
+
|
| 46 |
+
img_fp = './examples/test-doc.pdf'
|
| 47 |
+
p2t = Pix2Text.from_config()
|
| 48 |
+
page = p2t.recognize_page(img_fp)
|
| 49 |
+
page.to_markdown('output-page') # 导出的 Markdown 信息保存在 output-page 目录中
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
也可以使用命令行完成一样的功能,如下面命令使用了付费版模型(MFD + MFR + CnOCR 三个付费模型)进行识别:
|
| 53 |
+
|
| 54 |
+
```bash
|
| 55 |
+
p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type page -i docs/examples/page2.png -o output-page --save-debug-res output-debug-page
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
识别结果和 [output-md/output.md](output-md/output.md) 类似。
|
| 59 |
+
|
| 60 |
+
## 识别既有公式又有文本的段落图片
|
| 61 |
+
|
| 62 |
+
对于既有公式又有文本的段落图片,识别时不需要使用版面分析模型。
|
| 63 |
+
可以使用函数 `.recognize_text_formula()` 识别图片中的文字和数学公式。如针对以下图片 ([examples/en1.jpg](examples/en1.jpg)):
|
| 64 |
+
|
| 65 |
+
<figure markdown>
|
| 66 |
+
{: style="width:600px"}
|
| 67 |
+
</figure>
|
| 68 |
+
|
| 69 |
+
调用方式如下:
|
| 70 |
+
|
| 71 |
+
```python
|
| 72 |
+
from pix2text import Pix2Text, merge_line_texts
|
| 73 |
+
|
| 74 |
+
img_fp = './examples/en1.jpg'
|
| 75 |
+
p2t = Pix2Text.from_config()
|
| 76 |
+
outs = p2t.recognize_text_formula(img_fp, resized_shape=768, return_text=True)
|
| 77 |
+
print(outs)
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
返回结果 `outs` 是个 `dict`,其中 key `position` 表示Box位置信息,`type` 表示类别信息,而 `text` 表示识别的结果。具体说明见[接口说明](#接口说明)。
|
| 81 |
+
|
| 82 |
+
也可以使用命令行完成一样的功能,如下面命令使用了付费版模型(MFD + MFR + CnOCR 三个付费模型)进行识别:
|
| 83 |
+
|
| 84 |
+
```bash
|
| 85 |
+
p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg --save-debug-res out-debug-en1.jpg
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
或者使用免费开源模型进行识别:
|
| 89 |
+
|
| 90 |
+
```bash
|
| 91 |
+
p2t predict -l en,ch_sim --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg --save-debug-res out-debug-en1.jpg
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
## 识别纯公式图片
|
| 95 |
+
|
| 96 |
+
对于只包含数学公式的图片,使用函数 `.recognize_formula()` 可以把数学公式识别为 LaTeX 表达式。如针对以下图片 ([examples/math-formula-42.png](examples/math-formula-42.png)):
|
| 97 |
+
|
| 98 |
+
<figure markdown>
|
| 99 |
+
{: style="width:300px"}
|
| 100 |
+
</figure>
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
调用方式如下:
|
| 104 |
+
|
| 105 |
+
```python
|
| 106 |
+
from pix2text import Pix2Text
|
| 107 |
+
|
| 108 |
+
img_fp = './examples/math-formula-42.png'
|
| 109 |
+
p2t = Pix2Text.from_config()
|
| 110 |
+
outs = p2t.recognize_formula(img_fp)
|
| 111 |
+
print(outs)
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
返回结果为字符串,即对应的 LaTeX 表达式。具体说明见[说明](usage.md)。
|
| 115 |
+
|
| 116 |
+
也可以使用命令行完成一样的功能,如下面命令使用了付费版模型(MFR 一个付费模型)进行识别:
|
| 117 |
+
|
| 118 |
+
```bash
|
| 119 |
+
p2t predict -l en,ch_sim --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --file-type formula -i docs/examples/math-formula-42.png
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
或者使用免费开源模型进行识别:
|
| 123 |
+
|
| 124 |
+
```bash
|
| 125 |
+
p2t predict -l en,ch_sim --file-type formula -i docs/examples/math-formula-42.png
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
## 识别纯文字图片
|
| 129 |
+
|
| 130 |
+
对于只包含文字不包含数学公式的图片,使用函数 `.recognize_text()` 可以识别出图片中的文字。此时 Pix2Text 相当于一般的文字 OCR 引擎。如针对以下图片 ([examples/general.jpg](examples/general.jpg)):
|
| 131 |
+
|
| 132 |
+
<figure markdown>
|
| 133 |
+
{: style="width:400px"}
|
| 134 |
+
</figure>
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
调用方式如下:
|
| 138 |
+
|
| 139 |
+
```python
|
| 140 |
+
from pix2text import Pix2Text
|
| 141 |
+
|
| 142 |
+
img_fp = './examples/general.jpg'
|
| 143 |
+
p2t = Pix2Text.from_config()
|
| 144 |
+
outs = p2t.recognize_text(img_fp)
|
| 145 |
+
print(outs)
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
返回结果为字符串,即对应的文字序列。具体说明见[接口说明](https://pix2text.readthedocs.io/zh-cn/latest/pix2text/pix_to_text/)。
|
| 149 |
+
|
| 150 |
+
也可以使用命令行完成一样的功能,如下面命令使用了付费版模型(CnOCR 一个付费模型)进行识别:
|
| 151 |
+
|
| 152 |
+
```bash
|
| 153 |
+
p2t predict -l en,ch_sim --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --file-type text --no-return-text -i docs/examples/general.jpg --save-debug-res out-debug-general.jpg
|
| 154 |
+
```
|
| 155 |
+
|
| 156 |
+
或者使用免费开源模型进行识别:
|
| 157 |
+
|
| 158 |
+
```bash
|
| 159 |
+
p2t predict -l en,ch_sim --file-type text --no-return-text -i docs/examples/general.jpg --save-debug-res out-debug-general.jpg
|
| 160 |
+
```
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
## 针对不同语言
|
| 164 |
+
|
| 165 |
+
### 英文
|
| 166 |
+
|
| 167 |
+
**识别效果**:
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
|
| 171 |
+
**识别命令**:
|
| 172 |
+
|
| 173 |
+
```bash
|
| 174 |
+
p2t predict -l en --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg
|
| 175 |
+
```
|
| 176 |
+
|
| 177 |
+
### 简体中文
|
| 178 |
+
|
| 179 |
+
**识别效果**:
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
|
| 183 |
+
**识别命令**:
|
| 184 |
+
|
| 185 |
+
```bash
|
| 186 |
+
p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --auto-line-break --file-type text_formula -i docs/examples/mixed.jpg --save-debug-res out-debug-mixed.jpg
|
| 187 |
+
```
|
| 188 |
+
|
| 189 |
+
### 繁体中文
|
| 190 |
+
|
| 191 |
+
**识别效果**:
|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
|
| 195 |
+
**识别命令**:
|
| 196 |
+
|
| 197 |
+
```bash
|
| 198 |
+
p2t predict -l en,ch_tra --mfd-config '{"model_name": "mfd-pro", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --resized-shape 768 --auto-line-break --file-type text_formula -i docs/examples/ch_tra.jpg --save-debug-res out-debug-tra.jpg
|
| 199 |
+
```
|
| 200 |
+
|
| 201 |
+
> 注意 ⚠️ :请通过以下命令安装 pix2text 的多语言版本:
|
| 202 |
+
> ```bash
|
| 203 |
+
> pip install pix2text[multilingual]
|
| 204 |
+
> ```
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
### 越南语
|
| 208 |
+
**识别效果**:
|
| 209 |
+
|
| 210 |
+

|
| 211 |
+
|
| 212 |
+
**识别命令**:
|
| 213 |
+
|
| 214 |
+
```bash
|
| 215 |
+
p2t predict -l en,vi --mfd-config '{"model_name": "mfd-pro", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro","model_backend":"onnx"}' --resized-shape 608 --no-auto-line-break --file-type text_formula -i docs/examples/vietnamese.jpg --save-debug-res out-debug-vi.jpg
|
| 216 |
+
```
|
| 217 |
+
|
| 218 |
+
> 注意 ⚠️ :请通过以下命令安装 pix2text 的多语言版本:
|
| 219 |
+
> ```bash
|
| 220 |
+
> pip install pix2text[multilingual]
|
| 221 |
+
> ```
|
docs/examples/test-doc.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:746024d672224466f2fbcc46385afe71e186b3d6542ae4c7132f7fd9aac36ac7
|
| 3 |
+
size 1631522
|
docs/examples_en.md
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<figure markdown>
|
| 2 |
+
|
| 3 |
+
[中文](examples.md) | English
|
| 4 |
+
|
| 5 |
+
</figure>
|
| 6 |
+
|
| 7 |
+
# Examples
|
| 8 |
+
## Recognize PDF Files and Return Markdown Format
|
| 9 |
+
|
| 10 |
+
For PDF files, you can use the `.recognize_pdf()` function to recognize the entire file or specific pages and output the results as a Markdown file. For example, for the following PDF file ([examples/test-doc.pdf](examples/test-doc.pdf)),
|
| 11 |
+
you can call the function like this:
|
| 12 |
+
|
| 13 |
+
```python
|
| 14 |
+
from pix2text import Pix2Text
|
| 15 |
+
|
| 16 |
+
img_fp = './examples/test-doc.pdf'
|
| 17 |
+
p2t = Pix2Text.from_config()
|
| 18 |
+
doc = p2t.recognize_pdf(img_fp, page_numbers=[0, 1])
|
| 19 |
+
doc.to_markdown('output-md') # The exported Markdown information is saved in the output-md directory
|
| 20 |
+
```
|
| 21 |
+
|
| 22 |
+
You can also achieve the same functionality using the command line. Below is a command that uses the premium models (MFD + MFR + CnOCR) for recognition:
|
| 23 |
+
|
| 24 |
+
```bash
|
| 25 |
+
p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --rec-kwargs '{"page_numbers": [0, 1]}' --resized-shape 768 --file-type pdf -i docs/examples/test-doc.pdf -o output-md --save-debug-res output-debug
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
The recognition result can be found in [output-md/output.md](output-md/output.md).
|
| 29 |
+
|
| 30 |
+
<br/>
|
| 31 |
+
|
| 32 |
+
> If you wish to export formats other than Markdown, such as Word, HTML, PDF, etc., it is recommended to use the tool [Pandoc](https://pandoc.org) to convert the Markdown result.
|
| 33 |
+
|
| 34 |
+
## Recognize Images with Complex Layout
|
| 35 |
+
|
| 36 |
+
You can use the `.recognize_page()` function to recognize text and mathematical formulas in images. For example, for the following image ([examples/page2.png](examples/page2.png)):
|
| 37 |
+
|
| 38 |
+
<figure markdown>
|
| 39 |
+
{: style="width:600px"}
|
| 40 |
+
</figure>
|
| 41 |
+
|
| 42 |
+
You can call the function like this:
|
| 43 |
+
|
| 44 |
+
```python
|
| 45 |
+
from pix2text import Pix2Text
|
| 46 |
+
|
| 47 |
+
img_fp = './examples/test-doc.pdf'
|
| 48 |
+
p2t = Pix2Text.from_config()
|
| 49 |
+
page = p2t.recognize_page(img_fp)
|
| 50 |
+
page.to_markdown('output-page') # The exported Markdown information is saved in the output-page directory
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
You can also achieve the same functionality using the command line. Below is a command that uses the premium models (MFD + MFR + CnOCR) for recognition:
|
| 54 |
+
|
| 55 |
+
```bash
|
| 56 |
+
p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type page -i docs/examples/page2.png -o output-page --save-debug-res output-debug-page
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
The recognition result is similar to [output-md/output.md](output-md/output.md).
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
## Recognize Paragraph Images with Both Formulas and Texts
|
| 63 |
+
|
| 64 |
+
For paragraph images containing both formulas and texts, you don't need to use the layout analysis model. You can use the `.recognize_text_formula()` function to recognize both texts and mathematical formulas in the image. For example, for the following image ([examples/en1.jpg](examples/en1.jpg)):
|
| 65 |
+
|
| 66 |
+
<figure markdown>
|
| 67 |
+
{: style="width:600px"}
|
| 68 |
+
</figure>
|
| 69 |
+
|
| 70 |
+
You can call the function like this:
|
| 71 |
+
|
| 72 |
+
```python
|
| 73 |
+
from pix2text import Pix2Text, merge_line_texts
|
| 74 |
+
|
| 75 |
+
img_fp = './examples/en1.jpg'
|
| 76 |
+
p2t = Pix2Text.from_config()
|
| 77 |
+
outs = p2t.recognize_text_formula(img_fp, resized_shape=768, return_text=True)
|
| 78 |
+
print(outs)
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
The returned result `outs` is a dictionary, where the key `position` represents the box position information, `type` represents the category information, and `text` represents the recognition result. For detailed explanations, see [API Documentation](#api-documentation).
|
| 82 |
+
|
| 83 |
+
You can also achieve the same functionality using the command line. Below is a command that uses the premium models (MFD + MFR + CnOCR) for recognition:
|
| 84 |
+
|
| 85 |
+
```bash
|
| 86 |
+
p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg --save-debug-res out-debug-en1.jpg
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
Or use the free open-source models for recognition:
|
| 90 |
+
|
| 91 |
+
```bash
|
| 92 |
+
p2t predict -l en,ch_sim --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg --save-debug-res out-debug-en1.jpg
|
| 93 |
+
```
|
| 94 |
+
|
| 95 |
+
## Recognize Pure Formula Images
|
| 96 |
+
|
| 97 |
+
For images containing only mathematical formulas, you can use the `.recognize_formula()` function to recognize the formulas as LaTeX expressions. For example, for the following image ([examples/math-formula-42.png](examples/math-formula-42.png)):
|
| 98 |
+
|
| 99 |
+
<figure markdown>
|
| 100 |
+
{: style="width:300px"}
|
| 101 |
+
</figure>
|
| 102 |
+
|
| 103 |
+
You can call the function like this:
|
| 104 |
+
|
| 105 |
+
```python
|
| 106 |
+
from pix2text import Pix2Text
|
| 107 |
+
|
| 108 |
+
img_fp = './examples/math-formula-42.png'
|
| 109 |
+
p2t = Pix2Text.from_config()
|
| 110 |
+
outs = p2t.recognize_formula(img_fp)
|
| 111 |
+
print(outs)
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
The returned result is a string representing the corresponding LaTeX expression. For detailed explanations, see [Usage](usage.md).
|
| 115 |
+
|
| 116 |
+
You can also achieve the same functionality using the command line. Below is a command that uses the premium model (MFR) for recognition:
|
| 117 |
+
|
| 118 |
+
```bash
|
| 119 |
+
p2t predict -l en,ch_sim --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --file-type formula -i docs/examples/math-formula-42.png
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
Or use the free open-source model for recognition:
|
| 123 |
+
|
| 124 |
+
```bash
|
| 125 |
+
p2t predict -l en,ch_sim --file-type formula -i docs/examples/math-formula-42.png
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
## Recognize Pure Text Images
|
| 129 |
+
|
| 130 |
+
For images containing only text without mathematical formulas, you can use the `.recognize_text()` function to recognize the text in the image. In this case, Pix2Text acts as a general text OCR engine. For example, for the following image ([examples/general.jpg](examples/general.jpg)):
|
| 131 |
+
|
| 132 |
+
<figure markdown>
|
| 133 |
+
{: style="width:400px"}
|
| 134 |
+
</figure>
|
| 135 |
+
|
| 136 |
+
You can call the function like this:
|
| 137 |
+
|
| 138 |
+
```python
|
| 139 |
+
from pix2text import Pix2Text
|
| 140 |
+
|
| 141 |
+
img_fp = './examples/general.jpg'
|
| 142 |
+
p2t = Pix2Text.from_config()
|
| 143 |
+
outs = p2t.recognize_text(img_fp)
|
| 144 |
+
print(outs)
|
| 145 |
+
```
|
| 146 |
+
|
| 147 |
+
The returned result is a string representing the corresponding text sequence. For detailed explanations, see [API Documentation](https://pix2text.readthedocs.io/zh-cn/latest/pix2text/pix_to_text/).
|
| 148 |
+
|
| 149 |
+
You can also achieve the same functionality using the command line. Below is a command that uses the premium model (CnOCR) for recognition:
|
| 150 |
+
|
| 151 |
+
```bash
|
| 152 |
+
p2t predict -l en,ch_sim --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --file-type text --no-return-text -i docs/examples/general.jpg --save-debug-res out-debug-general.jpg
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
Or use the free open-source model for recognition:
|
| 156 |
+
|
| 157 |
+
```bash
|
| 158 |
+
p2t predict -l en,ch_sim --file-type text --no-return-text -i docs/examples/general.jpg --save-debug-res out-debug-general.jpg
|
| 159 |
+
```
|
| 160 |
+
|
| 161 |
+
## For Different Languages
|
| 162 |
+
|
| 163 |
+
### English
|
| 164 |
+
|
| 165 |
+
**Recognition Result**:
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
|
| 169 |
+
**Recognition Command**:
|
| 170 |
+
|
| 171 |
+
```bash
|
| 172 |
+
p2t predict -l en --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg
|
| 173 |
+
```
|
| 174 |
+
|
| 175 |
+
### Simplified Chinese
|
| 176 |
+
|
| 177 |
+
**Recognition Result**:
|
| 178 |
+
|
| 179 |
+

|
| 180 |
+
|
| 181 |
+
**Recognition Command**:
|
| 182 |
+
|
| 183 |
+
```bash
|
| 184 |
+
p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --auto-line-break --file-type text_formula -i docs/examples/mixed.jpg --save-debug-res out-debug-mixed.jpg
|
| 185 |
+
```
|
| 186 |
+
|
| 187 |
+
### Traditional Chinese
|
| 188 |
+
|
| 189 |
+
**Recognition Result**:
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
|
| 193 |
+
**Recognition Command**:
|
| 194 |
+
|
| 195 |
+
```bash
|
| 196 |
+
p2t predict -l en,ch_tra --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --resized-shape 768 --auto-line-break --file-type text_formula -i docs/examples/ch_tra.jpg --save-debug-res out-debug-tra.jpg
|
| 197 |
+
```
|
| 198 |
+
|
| 199 |
+
> Note ⚠️: Please install the multilingual version of pix2text using the following command:
|
| 200 |
+
> ```bash
|
| 201 |
+
> pip install pix2text[multilingual]
|
| 202 |
+
> ```
|
| 203 |
+
|
| 204 |
+
### Vietnamese
|
| 205 |
+
|
| 206 |
+
**Recognition Result**:
|
| 207 |
+
|
| 208 |
+

|
| 209 |
+
|
| 210 |
+
**Recognition Command**:
|
| 211 |
+
|
| 212 |
+
```bash
|
| 213 |
+
p2t predict -l en,vi --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --resized-shape 608 --no-auto-line-break --file-type text_formula -i docs/examples/vietnamese.jpg --save-debug-res out-debug-vi.jpg
|
| 214 |
+
```
|
| 215 |
+
|
| 216 |
+
> Note ⚠️: Please install the multilingual version of pix2text using the following command:
|
| 217 |
+
> ```bash
|
| 218 |
+
> pip install pix2text[multilingual]
|
| 219 |
+
> ```
|
docs/faq.md
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 常见问题(FAQ)
|
| 2 |
+
|
| 3 |
+
## Pix2Text 是免费的吗?
|
| 4 |
+
|
| 5 |
+
Pix2Text 代码和基础模型是免费的,而且是开源的。可以按需自行调整发布或商业使用。
|
| 6 |
+
|
| 7 |
+
但请注意,Pix2Text 的不同付费模型包含不同的 license,购买时请参考具体的 license 说明。
|
| 8 |
+
|
docs/figs/breezedeus.ico
ADDED
|
|
docs/index.md
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<figure markdown>
|
| 2 |
+
{: style="width:180px"}
|
| 3 |
+
</figure>
|
| 4 |
+
|
| 5 |
+
# Pix2Text (P2T)
|
| 6 |
+
[](https://discord.gg/GgD87WM8Tf)
|
| 7 |
+
[](https://pepy.tech/project/pix2text)
|
| 8 |
+
[](https://visitorbadge.io/status?path=https%3A%2F%2Fpix2text.readthedocs.io%2Fzh-cn%2Fstable%2F)
|
| 9 |
+
[](./LICENSE)
|
| 10 |
+
[](https://badge.fury.io/py/pix2text)
|
| 11 |
+
[](https://github.com/breezedeus/pix2text)
|
| 12 |
+
[](https://github.com/breezedeus/pix2text)
|
| 13 |
+

|
| 14 |
+

|
| 15 |
+
[](https://twitter.com/breezedeus)
|
| 16 |
+
|
| 17 |
+
<figure markdown>
|
| 18 |
+
[📖 使用](usage.md) |
|
| 19 |
+
[🛠️ 安装](install.md) |
|
| 20 |
+
[🧳 模型](models.md) |
|
| 21 |
+
[🛀🏻 在线Demo](demo.md) |
|
| 22 |
+
[💬 交流群](contact.md)
|
| 23 |
+
|
| 24 |
+
[English](index_en.md) | 中文
|
| 25 |
+
</figure>
|
| 26 |
+
|
| 27 |
+
**Pix2Text (P2T)** 期望成为 **[Mathpix](https://mathpix.com/)** 的**免费开源 Python** 替代工具,目前已经可以完成 **Mathpix** 的核心功能。
|
| 28 |
+
**Pix2Text (P2T) 可以识别图片中的版面、表格、图片、文字、数学公式等内容,并整合所有内容后以 Markdown 格式输出。P2T 也可以把一整个 PDF 文件(PDF 的内容可以是扫描图片或者其他任何格式)转换为 Markdown 格式。**
|
| 29 |
+
|
| 30 |
+
**Pix2Text (P2T)** 整合了以下模型:
|
| 31 |
+
|
| 32 |
+
- **版面分析模型**:[breezedeus/pix2text-layout-docyolo](https://huggingface.co/breezedeus/pix2text-layout-docyolo) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-layout-docyolo))。
|
| 33 |
+
- **表格识别模型**:[breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-table-rec))。
|
| 34 |
+
- **文字识别引擎**:支持 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 [CnOCR](https://github.com/breezedeus/cnocr) ,其他语言的识别使用的是开源 OCR 工具 [EasyOCR](https://github.com/JaidedAI/EasyOCR) 。
|
| 35 |
+
- **数学公式检测模型(MFD)**:[breezedeus/pix2text-mfd-1.5](https://huggingface.co/breezedeus/pix2text-mfd-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd-1.5))。基于 [CnSTD](https://github.com/breezedeus/cnstd) 实现。
|
| 36 |
+
- **数学公式识别模型(MFR)**:[breezedeus/pix2text-mfr-1.5](https://huggingface.co/breezedeus/pix2text-mfr-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfr-1.5))。
|
| 37 |
+
|
| 38 |
+
其中多个模型来自其他开源作者, 非常感谢他们的贡献。
|
| 39 |
+
|
| 40 |
+
<div align="center">
|
| 41 |
+
<img src="figs/arch-flow.jpg" alt="Pix2Text Arch Flow"/>
|
| 42 |
+
</div>
|
| 43 |
+
|
| 44 |
+
具体说明请参考 [可用模型](models.md)。
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
P2T 作为Python3工具包,对于不熟悉Python的朋友不太友好,所以我们也发布了**可免费使用**的 **[P2T网页版](https://p2t.breezedeus.com)**,直接把图片丢进网页就能输出P2T的解析结果。**网页版会使用最新的模型,效果会比开源模型更好。**
|
| 48 |
+
|
| 49 |
+
感兴趣的朋友欢迎扫码加小助手为好友,备注 `p2t`,小助手会定期统一邀请大家入群。群内会发布P2T相关工具的最新进展:
|
| 50 |
+
|
| 51 |
+
<div align="center">
|
| 52 |
+
<img src="figs/wx-qr-code.JPG" alt="微信群二维码" width="300px"/>
|
| 53 |
+
</div>
|
| 54 |
+
|
| 55 |
+
作者也维护 **知识星球** [**P2T/CnOCR/CnSTD私享群**](https://t.zsxq.com/FEYZRJQ) ,这里面的提问会较快得到作者的回复,欢迎加入。**知识星球私享群**也会陆续发布一些P2T/CnOCR/CnSTD相关的私有资料,包括**部分未公开的模型**,**购买付费模型享优惠**,**不同应用场景的调用代码**,使用过程中遇到的难题解答等。星球也会发布P2T/OCR/STD相关的最新研究资料。
|
| 56 |
+
|
| 57 |
+
更多说明可见 [交流群](contact.md)。
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
## 支持的语言列表
|
| 61 |
+
|
| 62 |
+
Pix2Text 的文字识别引擎支持 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 **[CnOCR](https://github.com/breezedeus/cnocr)** ,其他语言的识别使用的是开源 OCR 工具 **[EasyOCR](https://github.com/JaidedAI/EasyOCR)** ,感谢相关的作者们。
|
| 63 |
+
|
| 64 |
+
支持的**语言列表**和**语言代码**如下:
|
| 65 |
+
<details>
|
| 66 |
+
<summary>↓↓↓ Click to show details ↓↓↓</summary>
|
| 67 |
+
|
| 68 |
+
| Language | Code Name |
|
| 69 |
+
| ------------------- | ----------- |
|
| 70 |
+
| Abaza | abq |
|
| 71 |
+
| Adyghe | ady |
|
| 72 |
+
| Afrikaans | af |
|
| 73 |
+
| Angika | ang |
|
| 74 |
+
| Arabic | ar |
|
| 75 |
+
| Assamese | as |
|
| 76 |
+
| Avar | ava |
|
| 77 |
+
| Azerbaijani | az |
|
| 78 |
+
| Belarusian | be |
|
| 79 |
+
| Bulgarian | bg |
|
| 80 |
+
| Bihari | bh |
|
| 81 |
+
| Bhojpuri | bho |
|
| 82 |
+
| Bengali | bn |
|
| 83 |
+
| Bosnian | bs |
|
| 84 |
+
| Simplified Chinese | ch_sim |
|
| 85 |
+
| Traditional Chinese | ch_tra |
|
| 86 |
+
| Chechen | che |
|
| 87 |
+
| Czech | cs |
|
| 88 |
+
| Welsh | cy |
|
| 89 |
+
| Danish | da |
|
| 90 |
+
| Dargwa | dar |
|
| 91 |
+
| German | de |
|
| 92 |
+
| English | en |
|
| 93 |
+
| Spanish | es |
|
| 94 |
+
| Estonian | et |
|
| 95 |
+
| Persian (Farsi) | fa |
|
| 96 |
+
| French | fr |
|
| 97 |
+
| Irish | ga |
|
| 98 |
+
| Goan Konkani | gom |
|
| 99 |
+
| Hindi | hi |
|
| 100 |
+
| Croatian | hr |
|
| 101 |
+
| Hungarian | hu |
|
| 102 |
+
| Indonesian | id |
|
| 103 |
+
| Ingush | inh |
|
| 104 |
+
| Icelandic | is |
|
| 105 |
+
| Italian | it |
|
| 106 |
+
| Japanese | ja |
|
| 107 |
+
| Kabardian | kbd |
|
| 108 |
+
| Kannada | kn |
|
| 109 |
+
| Korean | ko |
|
| 110 |
+
| Kurdish | ku |
|
| 111 |
+
| Latin | la |
|
| 112 |
+
| Lak | lbe |
|
| 113 |
+
| Lezghian | lez |
|
| 114 |
+
| Lithuanian | lt |
|
| 115 |
+
| Latvian | lv |
|
| 116 |
+
| Magahi | mah |
|
| 117 |
+
| Maithili | mai |
|
| 118 |
+
| Maori | mi |
|
| 119 |
+
| Mongolian | mn |
|
| 120 |
+
| Marathi | mr |
|
| 121 |
+
| Malay | ms |
|
| 122 |
+
| Maltese | mt |
|
| 123 |
+
| Nepali | ne |
|
| 124 |
+
| Newari | new |
|
| 125 |
+
| Dutch | nl |
|
| 126 |
+
| Norwegian | no |
|
| 127 |
+
| Occitan | oc |
|
| 128 |
+
| Pali | pi |
|
| 129 |
+
| Polish | pl |
|
| 130 |
+
| Portuguese | pt |
|
| 131 |
+
| Romanian | ro |
|
| 132 |
+
| Russian | ru |
|
| 133 |
+
| Serbian (cyrillic) | rs_cyrillic |
|
| 134 |
+
| Serbian (latin) | rs_latin |
|
| 135 |
+
| Nagpuri | sck |
|
| 136 |
+
| Slovak | sk |
|
| 137 |
+
| Slovenian | sl |
|
| 138 |
+
| Albanian | sq |
|
| 139 |
+
| Swedish | sv |
|
| 140 |
+
| Swahili | sw |
|
| 141 |
+
| Tamil | ta |
|
| 142 |
+
| Tabassaran | tab |
|
| 143 |
+
| Telugu | te |
|
| 144 |
+
| Thai | th |
|
| 145 |
+
| Tajik | tjk |
|
| 146 |
+
| Tagalog | tl |
|
| 147 |
+
| Turkish | tr |
|
| 148 |
+
| Uyghur | ug |
|
| 149 |
+
| Ukranian | uk |
|
| 150 |
+
| Urdu | ur |
|
| 151 |
+
| Uzbek | uz |
|
| 152 |
+
| Vietnamese | vi |
|
| 153 |
+
|
| 154 |
+
> Ref: [Supported Languages](https://www.jaided.ai/easyocr/) .
|
| 155 |
+
|
| 156 |
+
</details>
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
## P2T 网页版
|
| 161 |
+
|
| 162 |
+
所有人都可以免费使用 **[P2T网页版](https://p2t.breezedeus.com)**,每人每天可以免费识别 10000 个字符,正常使用应该够用了。*请不要批量调用接口,机器资源有限,批量调用会导致其他人无法使用服务。*
|
| 163 |
+
|
| 164 |
+
受限于机器资源,网页版当前只支持**简体中文和英文**,要尝试其他语言上的效果,请使用以下的**在线 Demo**。
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
## 在线 Demo 🤗
|
| 169 |
+
|
| 170 |
+
也可以使用 **[在线 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)**(无法科学上网可以使用 [国内镜像](https://hf.qhduan.com/spaces/breezedeus/Pix2Text-Demo)) 尝试 **P2T** 在不同语言上的效果。但在线 Demo 使用的硬件配置较低,速度会较慢。如果是简体中文或者英文图片,建议使用 **[P2T网页版](https://p2t.breezedeus.com)**。
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
## 安装
|
| 174 |
+
|
| 175 |
+
嗯,顺利的话一行命令即可。
|
| 176 |
+
|
| 177 |
+
```bash
|
| 178 |
+
pip install pix2text
|
| 179 |
+
```
|
| 180 |
+
|
| 181 |
+
如果需要识别**英文**与**简体中文**之外的文字,请使用以下命令安装额外的包:
|
| 182 |
+
|
| 183 |
+
```bash
|
| 184 |
+
pip install pix2text[multilingual]
|
| 185 |
+
```
|
| 186 |
+
|
| 187 |
+
安装速度慢的话,可以指定国内的安装源,如使用阿里云的安装源:
|
| 188 |
+
|
| 189 |
+
```bash
|
| 190 |
+
pip install pix2text -i https://mirrors.aliyun.com/pypi/simple
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
如果是初次使用**OpenCV**,那估计安装都不会很顺利,bless。
|
| 194 |
+
|
| 195 |
+
**Pix2Text** 主要依赖 [**CnSTD>=1.2.4**](https://github.com/breezedeus/cnstd)、[**CnOCR>=2.3**](https://github.com/breezedeus/cnocr) ,以及 [**transformers>=4.37.0**](https://github.com/huggingface/transformers) 。如果安装过程遇到问题,也可参考它们的安装说明文档。
|
| 196 |
+
|
| 197 |
+
> **Warning**
|
| 198 |
+
>
|
| 199 |
+
> 如果电脑中从未安装过 `PyTorch`,`OpenCV` python包,初次安装可能会遇到不少问题,但一般都是常见问题,可以自行百度/Google解决。
|
| 200 |
+
|
| 201 |
+
更多说明参考 [安装说明](install.md) 。
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
## 使用说明
|
| 205 |
+
|
| 206 |
+
参见:[使用说明](usage.md)。
|
| 207 |
+
|
| 208 |
+
## 示例
|
| 209 |
+
|
| 210 |
+
参见:[示例](examples.md)。
|
| 211 |
+
|
| 212 |
+
## 模型下载
|
| 213 |
+
|
| 214 |
+
参见:[模型](models.md)。
|
| 215 |
+
|
| 216 |
+
## 命令行工具
|
| 217 |
+
|
| 218 |
+
参见:[命令行工具](command.md)。
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
## HTTP 服务
|
| 222 |
+
|
| 223 |
+
使用命令 **`p2t serve`** 开启一个 HTTP 服务,用于接收图片(当前不支持 PDF)并返回识别结果。
|
| 224 |
+
|
| 225 |
+
```bash
|
| 226 |
+
p2t serve -l en,ch_sim -H 0.0.0.0 -p 8503
|
| 227 |
+
```
|
| 228 |
+
|
| 229 |
+
之后可以使用 curl 调用服务:
|
| 230 |
+
|
| 231 |
+
```bash
|
| 232 |
+
curl -X POST \
|
| 233 |
+
-F "file_type=page" \
|
| 234 |
+
-F "resized_shape=768" \
|
| 235 |
+
-F "embed_sep= $,$ " \
|
| 236 |
+
-F "isolated_sep=$$\n, \n$$" \
|
| 237 |
+
-F "image=@docs/examples/page2.png;type=image/jpeg" \
|
| 238 |
+
http://0.0.0.0:8503/pix2text
|
| 239 |
+
```
|
| 240 |
+
|
| 241 |
+
更多说明参考 [命令说明/开启服务](command.md) 。
|
| 242 |
+
|
| 243 |
+
## Mac 桌面客户端
|
| 244 |
+
|
| 245 |
+
请参考 [Pix2Text-Mac](https://github.com/breezedeus/Pix2Text-Mac) 安装 Pix2Text 的 MacOS 桌面客户端。
|
| 246 |
+
|
| 247 |
+
<div align="center">
|
| 248 |
+
<img src="https://github.com/breezedeus/Pix2Text-Mac/raw/main/assets/on_menu_bar.jpg" alt="Pix2Text Mac 客户端" width="400px"/>
|
| 249 |
+
</div>
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
## 给作者来杯咖啡
|
| 253 |
+
|
| 254 |
+
开源不易,如果此项目对您有帮助,可以考虑 [给作者加点油🥤,鼓鼓气💪🏻](buymeacoffee.md) 。
|
| 255 |
+
|
| 256 |
+
---
|
| 257 |
+
|
| 258 |
+
官方代码库:
|
| 259 |
+
|
| 260 |
+
* **Github**: [https://github.com/breezedeus/pix2text](https://github.com/breezedeus/pix2text) 。
|
| 261 |
+
* **Gitee**: [https://gitee.com/breezedeus/pix2text](https://gitee.com/breezedeus/pix2text) 。
|
| 262 |
+
|
| 263 |
+
Pix2Text (P2T) 更多信息:[https://www.breezedeus.com/article/pix2text_cn](https://www.breezedeus.com/article/pix2text_cn) 。
|
docs/index_en.md
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<figure markdown>
|
| 2 |
+
{: style="width:180px"}
|
| 3 |
+
</figure>
|
| 4 |
+
|
| 5 |
+
# Pix2Text (P2T)
|
| 6 |
+
[](https://discord.gg/GgD87WM8Tf)
|
| 7 |
+
[](https://pepy.tech/project/pix2text)
|
| 8 |
+
[](https://visitorbadge.io/status?path=https%3A%2F%2Fpix2text.readthedocs.io%2Fzh-cn%2Fstable%2F)
|
| 9 |
+
[](./LICENSE)
|
| 10 |
+
[](https://badge.fury.io/py/pix2text)
|
| 11 |
+
[](https://github.com/breezedeus/pix2text)
|
| 12 |
+
[](https://github.com/breezedeus/pix2text)
|
| 13 |
+

|
| 14 |
+

|
| 15 |
+
[](https://twitter.com/breezedeus)
|
| 16 |
+
|
| 17 |
+
<figure markdown>
|
| 18 |
+
[📖 Usage](usage.md) |
|
| 19 |
+
[🛠️ Install](install.md) |
|
| 20 |
+
[🧳 Models](models.md) |
|
| 21 |
+
[🛀🏻 Demo](demo.md) |
|
| 22 |
+
[💬 Contact](contact.md)
|
| 23 |
+
|
| 24 |
+
[中文](index.md) | English
|
| 25 |
+
</figure>
|
| 26 |
+
|
| 27 |
+
**Pix2Text (P2T)** aims to be a **free and open-source Python** alternative to **[Mathpix](https://mathpix.com/)**, and it can already accomplish **Mathpix**'s core functionality. **Pix2Text (P2T) can recognize layouts, tables, images, text, mathematical formulas, and integrate all of these contents into Markdown format. P2T can also convert an entire PDF file (which can contain scanned images or any other format) into Markdown format.**
|
| 28 |
+
|
| 29 |
+
**Pix2Text (P2T)** integrates the following models:
|
| 30 |
+
|
| 31 |
+
- **Layout Analysis Model**: [breezedeus/pix2text-layout-docyolo](https://huggingface.co/breezedeus/pix2text-layout-docyolo) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-layout-docyolo)).
|
| 32 |
+
- **Table Recognition Model**: [breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-table-rec)).
|
| 33 |
+
- **Text Recognition Engine**: Supports **80+ languages** such as **English, Simplified Chinese, Traditional Chinese, Vietnamese**, etc. For English and Simplified Chinese recognition, it uses the open-source OCR tool [CnOCR](https://github.com/breezedeus/cnocr), while for other languages, it uses the open-source OCR tool [EasyOCR](https://github.com/JaidedAI/EasyOCR).
|
| 34 |
+
- **Mathematical Formula Detection Model (MFD)**: [breezedeus/pix2text-mfd-1.5](https://huggingface.co/breezedeus/pix2text-mfd-1.5) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfd-1.5)). Implemented based on [CnSTD](https://github.com/breezedeus/cnstd).
|
| 35 |
+
- **Mathematical Formula Recognition Model (MFR)**: [breezedeus/pix2text-mfr-1.5](https://huggingface.co/breezedeus/pix2text-mfr-1.5) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfr-1.5)).
|
| 36 |
+
|
| 37 |
+
Several models are contributed by other open-source authors, and their contributions are highly appreciated.
|
| 38 |
+
|
| 39 |
+
<figure markdown>
|
| 40 |
+

|
| 41 |
+
</figure>
|
| 42 |
+
|
| 43 |
+
For detailed explanations, please refer to the [Models](models.md).
|
| 44 |
+
|
| 45 |
+
As a Python3 toolkit, P2T may not be very user-friendly for those who are not familiar with Python. Therefore, we also provide a **[free-to-use P2T Online Web](https://p2t.breezedeus.com)**, where you can directly upload images and get P2T parsing results. The web version uses the latest models, resulting in better performance compared to the open-source models.
|
| 46 |
+
|
| 47 |
+
Welcome to join [**Pix2Text Discord Server**](https://discord.gg/GgD87WM8Tf), if you have any questions or suggestions.
|
| 48 |
+
|
| 49 |
+
If you're interested, feel free to add the WeChat assistant as a friend by scanning the QR code and mentioning `p2t`. The assistant will regularly invite everyone to join the group where the latest developments related to P2T tools will be announced:
|
| 50 |
+
|
| 51 |
+
<figure markdown>
|
| 52 |
+
{: style="width:300px"}
|
| 53 |
+
</figure>
|
| 54 |
+
|
| 55 |
+
The author also maintains a **Knowledge Planet** [**P2T/CnOCR/CnSTD Private Group**](https://t.zsxq.com/FEYZRJQ), where questions are answered promptly. You're welcome to join. The **knowledge planet private group** will also gradually release some private materials related to P2T/CnOCR/CnSTD, including **some unreleased models**, **discounts on purchasing premium models**, **code snippets for different application scenarios**, and answers to difficult problems encountered during use. The planet will also publish the latest research materials related to P2T/OCR/STD.
|
| 56 |
+
|
| 57 |
+
For more contact method, please refer to [Contact](contact.md).
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
## List of Supported Languages
|
| 61 |
+
|
| 62 |
+
The text recognition engine of Pix2Text supports **`80+` languages**, including **English, Simplified Chinese, Traditional Chinese, Vietnamese**, etc. Among these, **English** and **Simplified Chinese** recognition utilize the open-source OCR tool **[CnOCR](https://github.com/breezedeus/cnocr)**, while recognition for other languages employs the open-source OCR tool **[EasyOCR](https://github.com/JaidedAI/EasyOCR)**. Special thanks to the respective authors.
|
| 63 |
+
|
| 64 |
+
List of **Supported Languages** and **Language Codes** are shown below:
|
| 65 |
+
|
| 66 |
+
<details>
|
| 67 |
+
<summary>↓↓↓ Click to show details ↓↓↓</summary>
|
| 68 |
+
|
| 69 |
+
| Language | Code Name |
|
| 70 |
+
| ------------------- | ----------- |
|
| 71 |
+
| Abaza | abq |
|
| 72 |
+
| Adyghe | ady |
|
| 73 |
+
| Afrikaans | af |
|
| 74 |
+
| Angika | ang |
|
| 75 |
+
| Arabic | ar |
|
| 76 |
+
| Assamese | as |
|
| 77 |
+
| Avar | ava |
|
| 78 |
+
| Azerbaijani | az |
|
| 79 |
+
| Belarusian | be |
|
| 80 |
+
| Bulgarian | bg |
|
| 81 |
+
| Bihari | bh |
|
| 82 |
+
| Bhojpuri | bho |
|
| 83 |
+
| Bengali | bn |
|
| 84 |
+
| Bosnian | bs |
|
| 85 |
+
| Simplified Chinese | ch_sim |
|
| 86 |
+
| Traditional Chinese | ch_tra |
|
| 87 |
+
| Chechen | che |
|
| 88 |
+
| Czech | cs |
|
| 89 |
+
| Welsh | cy |
|
| 90 |
+
| Danish | da |
|
| 91 |
+
| Dargwa | dar |
|
| 92 |
+
| German | de |
|
| 93 |
+
| English | en |
|
| 94 |
+
| Spanish | es |
|
| 95 |
+
| Estonian | et |
|
| 96 |
+
| Persian (Farsi) | fa |
|
| 97 |
+
| French | fr |
|
| 98 |
+
| Irish | ga |
|
| 99 |
+
| Goan Konkani | gom |
|
| 100 |
+
| Hindi | hi |
|
| 101 |
+
| Croatian | hr |
|
| 102 |
+
| Hungarian | hu |
|
| 103 |
+
| Indonesian | id |
|
| 104 |
+
| Ingush | inh |
|
| 105 |
+
| Icelandic | is |
|
| 106 |
+
| Italian | it |
|
| 107 |
+
| Japanese | ja |
|
| 108 |
+
| Kabardian | kbd |
|
| 109 |
+
| Kannada | kn |
|
| 110 |
+
| Korean | ko |
|
| 111 |
+
| Kurdish | ku |
|
| 112 |
+
| Latin | la |
|
| 113 |
+
| Lak | lbe |
|
| 114 |
+
| Lezghian | lez |
|
| 115 |
+
| Lithuanian | lt |
|
| 116 |
+
| Latvian | lv |
|
| 117 |
+
| Magahi | mah |
|
| 118 |
+
| Maithili | mai |
|
| 119 |
+
| Maori | mi |
|
| 120 |
+
| Mongolian | mn |
|
| 121 |
+
| Marathi | mr |
|
| 122 |
+
| Malay | ms |
|
| 123 |
+
| Maltese | mt |
|
| 124 |
+
| Nepali | ne |
|
| 125 |
+
| Newari | new |
|
| 126 |
+
| Dutch | nl |
|
| 127 |
+
| Norwegian | no |
|
| 128 |
+
| Occitan | oc |
|
| 129 |
+
| Pali | pi |
|
| 130 |
+
| Polish | pl |
|
| 131 |
+
| Portuguese | pt |
|
| 132 |
+
| Romanian | ro |
|
| 133 |
+
| Russian | ru |
|
| 134 |
+
| Serbian (cyrillic) | rs_cyrillic |
|
| 135 |
+
| Serbian (latin) | rs_latin |
|
| 136 |
+
| Nagpuri | sck |
|
| 137 |
+
| Slovak | sk |
|
| 138 |
+
| Slovenian | sl |
|
| 139 |
+
| Albanian | sq |
|
| 140 |
+
| Swedish | sv |
|
| 141 |
+
| Swahili | sw |
|
| 142 |
+
| Tamil | ta |
|
| 143 |
+
| Tabassaran | tab |
|
| 144 |
+
| Telugu | te |
|
| 145 |
+
| Thai | th |
|
| 146 |
+
| Tajik | tjk |
|
| 147 |
+
| Tagalog | tl |
|
| 148 |
+
| Turkish | tr |
|
| 149 |
+
| Uyghur | ug |
|
| 150 |
+
| Ukranian | uk |
|
| 151 |
+
| Urdu | ur |
|
| 152 |
+
| Uzbek | uz |
|
| 153 |
+
| Vietnamese | vi |
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
> Ref: [Supported Languages](https://www.jaided.ai/easyocr/) .
|
| 157 |
+
|
| 158 |
+
</details>
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
## Online Service
|
| 162 |
+
|
| 163 |
+
Everyone can use the **[P2T Online Service](https://p2t.breezedeus.com)** for free, with a daily limit of 10,000 characters per account, which should be sufficient for normal use. *Please refrain from bulk API calls, as machine resources are limited, and this could prevent others from accessing the service.*
|
| 164 |
+
|
| 165 |
+
Due to hardware constraints, the Online Service currently only supports **Simplified Chinese** and **English** languages. To try the models in other languages, please use the following **Online Demo**.
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
## Online Demo 🤗
|
| 170 |
+
|
| 171 |
+
You can also try the **[Online Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)** ([Mirror](https://hf-mirror.com/spaces/breezedeus/Pix2Text-Demo)) to see the performance of **P2T** in various languages. However, the online demo operates on lower hardware specifications and may be slower. For Simplified Chinese or English images, it is recommended to use the **[P2T Online Service](https://p2t.breezedeus.com)**.
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
## Install
|
| 175 |
+
|
| 176 |
+
Well, one line of command is enough if it goes well.
|
| 177 |
+
|
| 178 |
+
```bash
|
| 179 |
+
pip install pix2text
|
| 180 |
+
```
|
| 181 |
+
|
| 182 |
+
If you need to recognize languages other than **English** and **Simplified Chinese**, please use the following command to install additional packages:
|
| 183 |
+
|
| 184 |
+
```bash
|
| 185 |
+
pip install pix2text[multilingual]
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
If the installation is slow, you can specify a domestic installation source, such as using the Aliyun source:
|
| 191 |
+
|
| 192 |
+
```bash
|
| 193 |
+
pip install pix2text -i https://mirrors.aliyun.com/pypi/simple
|
| 194 |
+
```
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
If it is your first time to use **OpenCV**, then probably the installation will not be very easy. Bless.
|
| 198 |
+
|
| 199 |
+
**Pix2Text** mainly depends on [**CnSTD>=1.2.1**](https://github.com/breezedeus/cnstd), [**CnOCR>=2.2.2.1**](https://github.com/breezedeus/cnocr), and [**transformers>=4.37.0**](https://github.com/huggingface/transformers). If you encounter problems with the installation, you can also refer to their installation instruction documentations.
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
> **Warning**
|
| 203 |
+
>
|
| 204 |
+
> If you have never installed the `PyTorch`, `OpenCV` python packages before, you may encounter a lot of problems during the first installation, but they are usually common problems that can be solved by Baidu/Google.
|
| 205 |
+
|
| 206 |
+
For more instructions, please refer to [Install](install.md) .
|
| 207 |
+
|
| 208 |
+
## Usage
|
| 209 |
+
|
| 210 |
+
Refer to: [Usage](usage.md).
|
| 211 |
+
|
| 212 |
+
## Examples
|
| 213 |
+
|
| 214 |
+
Refer to: [Examples](examples.md).
|
| 215 |
+
|
| 216 |
+
## Model Downloads
|
| 217 |
+
|
| 218 |
+
Refer to: [Models](models.md).
|
| 219 |
+
|
| 220 |
+
## Command Line Tools
|
| 221 |
+
|
| 222 |
+
Refer to: [Command Line Tools](command.md).
|
| 223 |
+
|
| 224 |
+
## HTTP Service
|
| 225 |
+
|
| 226 |
+
To start an HTTP service for receiving images (currently does not support PDF) and returning recognition results, use the command **`p2t serve`**.
|
| 227 |
+
|
| 228 |
+
```bash
|
| 229 |
+
p2t serve -l en,ch_sim -H 0.0.0.0 -p 8503
|
| 230 |
+
```
|
| 231 |
+
|
| 232 |
+
Afterwards, you can call the service using curl:
|
| 233 |
+
|
| 234 |
+
```bash
|
| 235 |
+
curl -X POST \
|
| 236 |
+
-F "file_type=page" \
|
| 237 |
+
-F "resized_shape=768" \
|
| 238 |
+
-F "embed_sep= $,$ " \
|
| 239 |
+
-F "isolated_sep=$$\n, \n$$" \
|
| 240 |
+
-F "image=@docs/examples/page2.png;type=image/jpeg" \
|
| 241 |
+
http://0.0.0.0:8503/pix2text
|
| 242 |
+
```
|
| 243 |
+
|
| 244 |
+
For more information, refer to [Command/Starting the Service](command.md).
|
| 245 |
+
|
| 246 |
+
## MacOS Desktop Application
|
| 247 |
+
|
| 248 |
+
Please refer to [Pix2Text-Mac](https://github.com/breezedeus/Pix2Text-Mac) for installing the Pix2Text Desktop App for MacOS.
|
| 249 |
+
|
| 250 |
+
<div align="center">
|
| 251 |
+
<img src="https://github.com/breezedeus/Pix2Text-Mac/raw/main/assets/on_menu_bar.jpg" alt="Pix2Text Mac App" width="400px"/>
|
| 252 |
+
</div>
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
## A cup of coffee for the author
|
| 256 |
+
|
| 257 |
+
It is not easy to maintain and evolve the project, so if it is helpful to you, please consider [offering the author a cup of coffee 🥤](https://www.breezedeus.com/buy-me-coffee).
|
| 258 |
+
|
| 259 |
+
---
|
| 260 |
+
|
| 261 |
+
Official code base: [https://github.com/breezedeus/pix2text](https://github.com/breezedeus/pix2text). Please cite it properly.
|
| 262 |
+
|
| 263 |
+
For more information on Pix2Text (P2T), visit: [https://www.breezedeus.com/article/pix2text](https://www.breezedeus.com/article/pix2text).
|
docs/install.md
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 安装
|
| 2 |
+
|
| 3 |
+
## pip 安装
|
| 4 |
+
|
| 5 |
+
嗯,顺利的话一行命令即可。
|
| 6 |
+
|
| 7 |
+
```bash
|
| 8 |
+
pip install pix2text
|
| 9 |
+
```
|
| 10 |
+
|
| 11 |
+
### 其他语言支持
|
| 12 |
+
如果需要识别**英文**与**简体中文**之外的文字,请使用以下命令安装额外的包:
|
| 13 |
+
|
| 14 |
+
```bash
|
| 15 |
+
pip install pix2text[multilingual]
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
### 使用 LLM/VLM API 接口
|
| 19 |
+
|
| 20 |
+
如果需要使用 **LLM/VLM** API 接口,请使用以下命令安装额外的包:
|
| 21 |
+
|
| 22 |
+
```bash
|
| 23 |
+
pip install pix2text[vlm]
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
### 国内安装源
|
| 27 |
+
安装速度慢的话,可以指定国内的安装源,如使用阿里云的安装源:
|
| 28 |
+
|
| 29 |
+
```bash
|
| 30 |
+
pip install pix2text -i https://mirrors.aliyun.com/pypi/simple
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
如果是初次使用**OpenCV**,那估计安装都不会很顺利,bless。
|
| 34 |
+
|
| 35 |
+
**Pix2Text** 主要依赖 [**CnSTD>=1.2.1**](https://github.com/breezedeus/cnstd)、[**CnOCR>=2.2.2.1**](https://github.com/breezedeus/cnocr) ,以及 [**transformers>=4.37.0**](https://github.com/huggingface/transformers) 。如果安装过程遇到问题,也可参考它们的安装说明文档。
|
| 36 |
+
|
| 37 |
+
> **Warning**
|
| 38 |
+
>
|
| 39 |
+
> 如果电脑中从未安装过 `PyTorch`,`OpenCV` python包,初次安装可能会遇到不少问题,但一般都是常见问题,可以自行百度/Google解决。
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
## GPU 环境使用 ONNX 模型
|
| 43 |
+
|
| 44 |
+
默认情况下安装的 **ONNX** 包是 **`onnxruntime`**,它只能在 `CPU` 上运行。如果需要在 `GPU` 环境使用 **ONNX** 模型,需要卸载此包,然后安装包 **`onnxruntime-gpu`** 。
|
| 45 |
+
|
| 46 |
+
```bash
|
| 47 |
+
pip uninstall onnxruntime
|
| 48 |
+
pip install onnxruntime-gpu
|
| 49 |
+
```
|
docs/models.md
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 各种模型
|
| 2 |
+
|
| 3 |
+
**Pix2Text (P2T)** 整合了很多不同功能的模型,主要包括:
|
| 4 |
+
|
| 5 |
+
- **版面分析模型**:[breezedeus/pix2text-layout](https://huggingface.co/breezedeus/pix2text-layout) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-layout))。
|
| 6 |
+
- **表格识别模型**:[breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-table-rec))。
|
| 7 |
+
- **文字识别引擎**:支持 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 [CnOCR](https://github.com/breezedeus/cnocr) ,其他语言的识别使用的是开源 OCR 工具 [EasyOCR](https://github.com/JaidedAI/EasyOCR) 。
|
| 8 |
+
- **数学公式检测模型(MFD)**:[breezedeus/pix2text-mfd-1.5](https://huggingface.co/breezedeus/pix2text-mfd-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd-1.5))。基于 [CnSTD](https://github.com/breezedeus/cnstd) 实现。
|
| 9 |
+
- **数学公式识别模型(MFR)**:[breezedeus/pix2text-mfr-1.5](https://huggingface.co/breezedeus/pix2text-mfr-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfr-1.5))。
|
| 10 |
+
|
| 11 |
+
其中多个模型来自其他开源作者, 非常感谢他们的贡献。
|
| 12 |
+
|
| 13 |
+
这些模型正常情况下都会自动下载(可能会比较慢,只要不报错请勿手动打断下载过程),但如果下载失败,可以参考以下的说明手动下载。
|
| 14 |
+
|
| 15 |
+
除基础模型外,Pix2Text 还提供了以下模型的高级付费版:
|
| 16 |
+
|
| 17 |
+
- MFD 和 MFR 付费模型:具体参考 [P2T详细资料 | Breezedeus.com](https://www.breezedeus.com/article/pix2text_cn)。
|
| 18 |
+
- CnOCR 付费模型:具体参考 [CnOCR详细资料 | Breezedeus.com](https://www.breezedeus.com/article/cnocr)。
|
| 19 |
+
|
| 20 |
+
具体说明请见本页面末尾。
|
| 21 |
+
|
| 22 |
+
下面的说明主要针对免费的基础模型。
|
| 23 |
+
|
| 24 |
+
## 版面分析模型
|
| 25 |
+
**版面分析模型** 下载地址:[breezedeus/pix2text-layout](https://huggingface.co/breezedeus/pix2text-layout) (不能科学上网请使用 [国内镜像](https://hf-mirror.com/breezedeus/pix2text-layout))。
|
| 26 |
+
把这里面的所有文件都下载到 `~/.pix2text/1.1/layout-parser` (Windows 系统放在 `C:\Users\<username>\AppData\Roaming\pix2text\1.1\layout-parser`)目录下即可,目录不存在的话请自己创建。
|
| 27 |
+
|
| 28 |
+
> 注:上面路径的 `1.1` 是 pix2text 的版本号,`1.1.*` 都对应 `1.1`。如果是其他版本请自行替换。
|
| 29 |
+
|
| 30 |
+
## 表格识别模型
|
| 31 |
+
**表格识别模型** 下载地址:[breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) (不能科学上网请使用 [国内镜像](https://hf-mirror.com/breezedeus/pix2text-table-rec))。
|
| 32 |
+
把这里面的所有文件都下载到 `~/.pix2text/1.1/table-rec` (Windows 系统放在 `C:\Users\<username>\AppData\Roaming\pix2text\1.1\table-rec`)目录下即可,目录不存在的话请自己创建。
|
| 33 |
+
|
| 34 |
+
> 注:上面路径的 `1.1` 是 pix2text 的版本号,`1.1.*` 都对应 `1.1`。如果是其他版本请自行替换。
|
| 35 |
+
|
| 36 |
+
## 数学公式检测模型(MFD)
|
| 37 |
+
### `pix2text >= 1.1.1`
|
| 38 |
+
Pix2Text 自 **V1.1.1** 开始,**数学公式检测模型** 下载地址:[breezedeus/pix2text-mfd](https://huggingface.co/breezedeus/pix2text-mfd) (不能科学上网请使用 [国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd))。
|
| 39 |
+
|
| 40 |
+
### `pix2text < 1.1.1`
|
| 41 |
+
**数学公式检测模型**(MFD)来自 [CnSTD](https://github.com/breezedeus/cnstd) 的数学公式检测模型(MFD),请参考其代码库说明。
|
| 42 |
+
|
| 43 |
+
如果系统无法自动成功下载模型文件,则需要手动从 [**cnstd-cnocr-models**](https://huggingface.co/breezedeus/cnstd-cnocr-models) ([国内镜像](https://hf-mirror.com/breezedeus/cnstd-cnocr-models))项目中下载,或者从[百度云盘](https://pan.baidu.com/s/1zDMzArCDrrXHWL0AWxwYQQ?pwd=nstd)(提取码为 `nstd`)下载对应的zip文件并把它存放于 `~/.cnstd/1.2`(Windows下为 `C:\Users\<username>\AppData\Roaming\cnstd\1.2`)目录中。
|
| 44 |
+
|
| 45 |
+
## 数学公式识别模型(MFR)
|
| 46 |
+
**数学公式识别模型** 下载地址:[breezedeus/pix2text-mfr](https://huggingface.co/breezedeus/pix2text-mfr) (不能科学上网请使用 [国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfr))。
|
| 47 |
+
把这里面的所有文件都下载到 `~/.pix2text/1.1/mfr-1.5-onnx` (Windows 系统放在 `C:\Users\<username>\AppData\Roaming\pix2text\1.1\mfr-1.5-onnx`)目录下即可,目录不存在的话请自己创建。
|
| 48 |
+
|
| 49 |
+
> 注:上面路径的 `1.1` 是 pix2text 的版本号,`1.1.*` 都对应 `1.1`。如果是其他版本请自行替换。
|
| 50 |
+
|
| 51 |
+
## 文字识别引擎
|
| 52 |
+
Pix2Text 的**文字识别引擎**可以识别 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 [CnOCR](https://github.com/breezedeus/cnocr) ,其他语言的识别使用的是开源 OCR 工具 [EasyOCR](https://github.com/JaidedAI/EasyOCR) 。
|
| 53 |
+
|
| 54 |
+
正常情况下,CnOCR 的模型都会自动下载。如果无法自动下载,可以参考以下说明手动下载。
|
| 55 |
+
CnOCR 的开源模型都放在 [**cnstd-cnocr-models**](https://huggingface.co/breezedeus/cnstd-cnocr-models) ([国内镜像](https://hf-mirror.com/breezedeus/cnstd-cnocr-models))项目中,可免费下载使用。
|
| 56 |
+
如果下载太慢,也可以从 [百度云盘](https://pan.baidu.com/s/1RhLBf8DcLnLuGLPrp89hUg?pwd=nocr) 下载, 提取码为 `nocr`。具体方法可参考 [CnOCR在线文档/使用方法](https://cnocr.readthedocs.io/zh-cn/latest/usage) 。
|
| 57 |
+
|
| 58 |
+
CnOCR 中的文字检测引擎使用的是 [CnSTD](https://github.com/breezedeus/cnstd),
|
| 59 |
+
如果系统无法自动成功下载模型文件,则需要手动从 [**cnstd-cnocr-models**](https://huggingface.co/breezedeus/cnstd-cnocr-models) ([国内镜像](https://hf-mirror.com/breezedeus/cnstd-cnocr-models))项目中下载,或者从[百度云盘](https://pan.baidu.com/s/1zDMzArCDrrXHWL0AWxwYQQ?pwd=nstd)(提取码为 `nstd`)下载对应的zip文件并把它存放于 `~/.cnstd/1.2`(Windows下为 `C:\Users\<username>\AppData\Roaming\cnstd\1.2`)目录中。
|
| 60 |
+
|
| 61 |
+
关于 CnOCR 模型的更多信息请参考 [CnOCR在线文档/可用模型](https://cnocr.readthedocs.io/zh-cn/latest/models)。
|
| 62 |
+
|
| 63 |
+
CnOCR 也提供**高级版的付费模型**,具体参考本文末尾的说明。
|
| 64 |
+
|
| 65 |
+
- CnOCR 付费模型:具体参考 [CnOCR详细资料 | Breezedeus.com](https://www.breezedeus.com/article/cnocr)。
|
| 66 |
+
|
| 67 |
+
<br/>
|
| 68 |
+
|
| 69 |
+
EasyOCR 模型下载请参考 [EasyOCR](https://github.com/JaidedAI/EasyOCR)。
|
| 70 |
+
|
| 71 |
+
## 高级版付费模型
|
| 72 |
+
|
| 73 |
+
除基础模型外,Pix2Text 还提供了以下模型的高级付费版:
|
| 74 |
+
|
| 75 |
+
- MFD 和 MFR 付费模型:具体参考 [P2T详细资料 | Breezedeus.com](https://www.breezedeus.com/article/pix2text_cn)。
|
| 76 |
+
- CnOCR 付费模型:具体参考 [CnOCR详细资料 | Breezedeus.com](https://www.breezedeus.com/article/cnocr)。
|
| 77 |
+
|
| 78 |
+
> 注意,付费模型包含不同的 license 版本,购买时请参考具体的产品说明。
|
| 79 |
+
|
| 80 |
+
建议购买前首先使用 **[在线 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)**(无法科学上网可以使用 [国内 Demo](https://hf-mirror.com/spaces/breezedeus/Pix2Text-Demo))**验证模型效果后再购买**。
|
| 81 |
+
|
| 82 |
+
**模型购买地址**:
|
| 83 |
+
|
| 84 |
+
| 模型名称 | 购买地址 | 说明
|
| 85 |
+
|--------------|------------------------------------------------------------|-----------------------------------------------------------------------------------|
|
| 86 |
+
| MFD pro 模型 | [Lemon Squeezy](https://ocr.lemonsqueezy.com) | 包含企业版和个人版,可开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) |
|
| 87 |
+
| MFD pro 模型 | [B站](https://mall.bilibili.com/neul-next/detailuniversal/detail.html?isMerchant=1&page=detailuniversal_detail&saleType=10&itemsId=11883911&loadingShow=1&noTitleBar=1&msource=merchant_share) | 仅包含个人版,不可商用,不能开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) |
|
| 88 |
+
| MFR pro 模型 | [Lemon Squeezy](https://ocr.lemonsqueezy.com) | 包含企业版和个人版,可开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) |
|
| 89 |
+
| MFR pro 模型 | [B站](https://mall.bilibili.com/neul-next/detailuniversal/detail.html?isMerchant=1&page=detailuniversal_detail&saleType=10&itemsId=11884166&loadingShow=1&noTitleBar=1&msource=merchant_share) | 仅包含个人版,不可商用,不能开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) |
|
| 90 |
+
| CnOCR pro 模型 | [Lemon Squeezy](https://ocr.lemonsqueezy.com) | 包含企业版和个人版,可开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) 和 [CnOCR详细资料](https://www.breezedeus.com/article/cnocr) |
|
| 91 |
+
| CnOCR pro 模型 | [B站](https://mall.bilibili.com/neul-next/detailuniversal/detail.html?isMerchant=1&page=detailuniversal_detail&saleType=10&itemsId=11884138&loadingShow=1&noTitleBar=1&msource=merchant_share) | 仅包含个人版,不可商用,不能开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) 和 [CnOCR详细资料](https://www.breezedeus.com/article/cnocr) |
|
| 92 |
+
|
| 93 |
+
购买过程遇到问题可以扫码加小助手为好友进行沟通,备注 `p2t`,小助手会尽快答复:
|
| 94 |
+
|
| 95 |
+
<figure markdown>
|
| 96 |
+
{: style="width:270px"}
|
| 97 |
+
</figure>
|
| 98 |
+
|
| 99 |
+
更多联系方式见 [交流群](contact.md)。
|
docs/pix2text/latex_ocr.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
:::pix2text.latex_ocr
|
docs/pix2text/pix_to_text.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
:::pix2text.pix_to_text
|
docs/pix2text/table_ocr.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
:::pix2text.table_ocr
|
docs/pix2text/text_formula_ocr.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
:::pix2text.text_formula_ocr
|
docs/requirements.txt
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# This file is autogenerated by pip-compile with Python 3.9
|
| 3 |
+
# by the following command:
|
| 4 |
+
#
|
| 5 |
+
# pip-compile --output-file=requirements.txt requirements.in
|
| 6 |
+
#
|
| 7 |
+
#--index-url https://mirrors.aliyun.com/pypi/simple
|
| 8 |
+
#--extra-index-url https://pypi.tuna.tsinghua.edu.cn/simple
|
| 9 |
+
#--extra-index-url https://pypi.org/simple
|
| 10 |
+
|
| 11 |
+
aiohttp==3.9.3
|
| 12 |
+
# via
|
| 13 |
+
# datasets
|
| 14 |
+
# fsspec
|
| 15 |
+
aiosignal==1.3.1
|
| 16 |
+
# via aiohttp
|
| 17 |
+
appdirs==1.4.4
|
| 18 |
+
# via wandb
|
| 19 |
+
async-timeout==4.0.3
|
| 20 |
+
# via aiohttp
|
| 21 |
+
attrs==23.2.0
|
| 22 |
+
# via aiohttp
|
| 23 |
+
certifi==2024.2.2
|
| 24 |
+
# via
|
| 25 |
+
# requests
|
| 26 |
+
# sentry-sdk
|
| 27 |
+
charset-normalizer==3.3.2
|
| 28 |
+
# via requests
|
| 29 |
+
click==8.1.7
|
| 30 |
+
# via
|
| 31 |
+
# -r requirements.in
|
| 32 |
+
# cnocr
|
| 33 |
+
# cnstd
|
| 34 |
+
# wandb
|
| 35 |
+
cnocr[ort-cpu]==2.3.0.2
|
| 36 |
+
# via
|
| 37 |
+
# -r requirements.in
|
| 38 |
+
# cnocr
|
| 39 |
+
cnstd==1.2.4.1
|
| 40 |
+
# via
|
| 41 |
+
# -r requirements.in
|
| 42 |
+
# cnocr
|
| 43 |
+
coloredlogs==15.0.1
|
| 44 |
+
# via
|
| 45 |
+
# onnxruntime
|
| 46 |
+
# optimum
|
| 47 |
+
contourpy==1.2.0
|
| 48 |
+
# via matplotlib
|
| 49 |
+
cycler==0.12.1
|
| 50 |
+
# via matplotlib
|
| 51 |
+
datasets==2.17.0
|
| 52 |
+
# via
|
| 53 |
+
# evaluate
|
| 54 |
+
# optimum
|
| 55 |
+
dill==0.3.8
|
| 56 |
+
# via
|
| 57 |
+
# datasets
|
| 58 |
+
# evaluate
|
| 59 |
+
# multiprocess
|
| 60 |
+
docker-pycreds==0.4.0
|
| 61 |
+
# via wandb
|
| 62 |
+
easyocr==1.7.1
|
| 63 |
+
# via -r requirements.in
|
| 64 |
+
evaluate==0.4.1
|
| 65 |
+
# via optimum
|
| 66 |
+
filelock==3.13.1
|
| 67 |
+
# via
|
| 68 |
+
# datasets
|
| 69 |
+
# huggingface-hub
|
| 70 |
+
# torch
|
| 71 |
+
# transformers
|
| 72 |
+
flatbuffers==23.5.26
|
| 73 |
+
# via onnxruntime
|
| 74 |
+
fonttools==4.49.0
|
| 75 |
+
# via matplotlib
|
| 76 |
+
frozenlist==1.4.1
|
| 77 |
+
# via
|
| 78 |
+
# aiohttp
|
| 79 |
+
# aiosignal
|
| 80 |
+
fsspec[http]==2023.10.0
|
| 81 |
+
# via
|
| 82 |
+
# datasets
|
| 83 |
+
# evaluate
|
| 84 |
+
# huggingface-hub
|
| 85 |
+
# pytorch-lightning
|
| 86 |
+
# torch
|
| 87 |
+
gitdb==4.0.11
|
| 88 |
+
# via gitpython
|
| 89 |
+
gitpython==3.1.42
|
| 90 |
+
# via wandb
|
| 91 |
+
huggingface-hub==0.20.3
|
| 92 |
+
# via
|
| 93 |
+
# cnstd
|
| 94 |
+
# datasets
|
| 95 |
+
# evaluate
|
| 96 |
+
# optimum
|
| 97 |
+
# tokenizers
|
| 98 |
+
# transformers
|
| 99 |
+
humanfriendly==10.0
|
| 100 |
+
# via coloredlogs
|
| 101 |
+
idna==3.6
|
| 102 |
+
# via
|
| 103 |
+
# requests
|
| 104 |
+
# yarl
|
| 105 |
+
imageio==2.34.0
|
| 106 |
+
# via scikit-image
|
| 107 |
+
importlib-resources==6.1.1
|
| 108 |
+
# via matplotlib
|
| 109 |
+
jinja2==3.0.3
|
| 110 |
+
# via torch
|
| 111 |
+
kiwisolver==1.4.5
|
| 112 |
+
# via matplotlib
|
| 113 |
+
lazy-loader==0.3
|
| 114 |
+
# via scikit-image
|
| 115 |
+
lightning-utilities==0.10.1
|
| 116 |
+
# via
|
| 117 |
+
# pytorch-lightning
|
| 118 |
+
# torchmetrics
|
| 119 |
+
markupsafe==2.1.5
|
| 120 |
+
# via jinja2
|
| 121 |
+
matplotlib==3.8.3
|
| 122 |
+
# via
|
| 123 |
+
# cnstd
|
| 124 |
+
# seaborn
|
| 125 |
+
mpmath==1.3.0
|
| 126 |
+
# via sympy
|
| 127 |
+
multidict==6.0.5
|
| 128 |
+
# via
|
| 129 |
+
# aiohttp
|
| 130 |
+
# yarl
|
| 131 |
+
multiprocess==0.70.16
|
| 132 |
+
# via
|
| 133 |
+
# datasets
|
| 134 |
+
# evaluate
|
| 135 |
+
networkx==3.2.1
|
| 136 |
+
# via
|
| 137 |
+
# scikit-image
|
| 138 |
+
# torch
|
| 139 |
+
ninja==1.11.1.1
|
| 140 |
+
# via easyocr
|
| 141 |
+
numpy==1.26.4
|
| 142 |
+
# via
|
| 143 |
+
# -r requirements.in
|
| 144 |
+
# cnocr
|
| 145 |
+
# cnstd
|
| 146 |
+
# contourpy
|
| 147 |
+
# datasets
|
| 148 |
+
# easyocr
|
| 149 |
+
# evaluate
|
| 150 |
+
# imageio
|
| 151 |
+
# matplotlib
|
| 152 |
+
# onnx
|
| 153 |
+
# onnxruntime
|
| 154 |
+
# opencv-python
|
| 155 |
+
# opencv-python-headless
|
| 156 |
+
# optimum
|
| 157 |
+
# pandas
|
| 158 |
+
# pyarrow
|
| 159 |
+
# pytorch-lightning
|
| 160 |
+
# scikit-image
|
| 161 |
+
# scipy
|
| 162 |
+
# seaborn
|
| 163 |
+
# shapely
|
| 164 |
+
# tifffile
|
| 165 |
+
# torchmetrics
|
| 166 |
+
# torchvision
|
| 167 |
+
# transformers
|
| 168 |
+
onnx==1.15.0
|
| 169 |
+
# via
|
| 170 |
+
# cnocr
|
| 171 |
+
# cnstd
|
| 172 |
+
# optimum
|
| 173 |
+
onnxruntime==1.17.0
|
| 174 |
+
# via
|
| 175 |
+
# cnocr
|
| 176 |
+
# optimum
|
| 177 |
+
opencv-python==4.9.0.80
|
| 178 |
+
# via
|
| 179 |
+
# -r requirements.in
|
| 180 |
+
# cnstd
|
| 181 |
+
opencv-python-headless==4.9.0.80
|
| 182 |
+
# via easyocr
|
| 183 |
+
optimum[onnxruntime]==1.16.2
|
| 184 |
+
# via -r requirements.in
|
| 185 |
+
packaging==23.2
|
| 186 |
+
# via
|
| 187 |
+
# datasets
|
| 188 |
+
# evaluate
|
| 189 |
+
# huggingface-hub
|
| 190 |
+
# lightning-utilities
|
| 191 |
+
# matplotlib
|
| 192 |
+
# onnxruntime
|
| 193 |
+
# optimum
|
| 194 |
+
# pytorch-lightning
|
| 195 |
+
# scikit-image
|
| 196 |
+
# torchmetrics
|
| 197 |
+
# transformers
|
| 198 |
+
pandas==2.2.0
|
| 199 |
+
# via
|
| 200 |
+
# cnstd
|
| 201 |
+
# datasets
|
| 202 |
+
# evaluate
|
| 203 |
+
# seaborn
|
| 204 |
+
pillow==10.2.0
|
| 205 |
+
# via
|
| 206 |
+
# -r requirements.in
|
| 207 |
+
# cnocr
|
| 208 |
+
# cnstd
|
| 209 |
+
# easyocr
|
| 210 |
+
# imageio
|
| 211 |
+
# matplotlib
|
| 212 |
+
# scikit-image
|
| 213 |
+
# torchvision
|
| 214 |
+
polygon3==3.0.9.1
|
| 215 |
+
# via cnstd
|
| 216 |
+
protobuf==4.25.3
|
| 217 |
+
# via
|
| 218 |
+
# onnx
|
| 219 |
+
# onnxruntime
|
| 220 |
+
# optimum
|
| 221 |
+
# transformers
|
| 222 |
+
# wandb
|
| 223 |
+
psutil==5.9.8
|
| 224 |
+
# via wandb
|
| 225 |
+
pyarrow==15.0.0
|
| 226 |
+
# via datasets
|
| 227 |
+
pyarrow-hotfix==0.6
|
| 228 |
+
# via datasets
|
| 229 |
+
pyclipper==1.3.0.post5
|
| 230 |
+
# via
|
| 231 |
+
# cnstd
|
| 232 |
+
# easyocr
|
| 233 |
+
pymupdf==1.24.1
|
| 234 |
+
# via -r requirements.in
|
| 235 |
+
pymupdfb==1.24.1
|
| 236 |
+
# via pymupdf
|
| 237 |
+
pyparsing==3.1.1
|
| 238 |
+
# via matplotlib
|
| 239 |
+
pyspellchecker==0.8.1
|
| 240 |
+
# via -r requirements.in
|
| 241 |
+
python-bidi==0.4.2
|
| 242 |
+
# via easyocr
|
| 243 |
+
python-dateutil==2.8.2
|
| 244 |
+
# via
|
| 245 |
+
# matplotlib
|
| 246 |
+
# pandas
|
| 247 |
+
pytorch-lightning==2.2.0.post0
|
| 248 |
+
# via
|
| 249 |
+
# cnocr
|
| 250 |
+
# cnstd
|
| 251 |
+
pytz==2024.1
|
| 252 |
+
# via pandas
|
| 253 |
+
pyyaml==6.0.1
|
| 254 |
+
# via
|
| 255 |
+
# cnstd
|
| 256 |
+
# datasets
|
| 257 |
+
# easyocr
|
| 258 |
+
# huggingface-hub
|
| 259 |
+
# pytorch-lightning
|
| 260 |
+
# transformers
|
| 261 |
+
# wandb
|
| 262 |
+
regex==2023.12.25
|
| 263 |
+
# via transformers
|
| 264 |
+
requests==2.31.0
|
| 265 |
+
# via
|
| 266 |
+
# datasets
|
| 267 |
+
# evaluate
|
| 268 |
+
# fsspec
|
| 269 |
+
# huggingface-hub
|
| 270 |
+
# responses
|
| 271 |
+
# torchvision
|
| 272 |
+
# transformers
|
| 273 |
+
# wandb
|
| 274 |
+
responses==0.18.0
|
| 275 |
+
# via evaluate
|
| 276 |
+
safetensors==0.4.2
|
| 277 |
+
# via transformers
|
| 278 |
+
scikit-image==0.22.0
|
| 279 |
+
# via easyocr
|
| 280 |
+
scipy==1.12.0
|
| 281 |
+
# via
|
| 282 |
+
# cnstd
|
| 283 |
+
# easyocr
|
| 284 |
+
# scikit-image
|
| 285 |
+
seaborn==0.13.2
|
| 286 |
+
# via cnstd
|
| 287 |
+
sentencepiece==0.1.99
|
| 288 |
+
# via transformers
|
| 289 |
+
sentry-sdk==1.40.4
|
| 290 |
+
# via wandb
|
| 291 |
+
setproctitle==1.3.3
|
| 292 |
+
# via wandb
|
| 293 |
+
shapely==2.0.2
|
| 294 |
+
# via
|
| 295 |
+
# cnstd
|
| 296 |
+
# easyocr
|
| 297 |
+
six==1.16.0
|
| 298 |
+
# via
|
| 299 |
+
# docker-pycreds
|
| 300 |
+
# python-bidi
|
| 301 |
+
# python-dateutil
|
| 302 |
+
smmap==5.0.1
|
| 303 |
+
# via gitdb
|
| 304 |
+
sympy==1.12
|
| 305 |
+
# via
|
| 306 |
+
# onnxruntime
|
| 307 |
+
# optimum
|
| 308 |
+
# torch
|
| 309 |
+
tifffile==2024.2.12
|
| 310 |
+
# via scikit-image
|
| 311 |
+
tokenizers==0.15.2
|
| 312 |
+
# via transformers
|
| 313 |
+
torch==2.2.0
|
| 314 |
+
# via
|
| 315 |
+
# -r requirements.in
|
| 316 |
+
# cnocr
|
| 317 |
+
# cnstd
|
| 318 |
+
# easyocr
|
| 319 |
+
# optimum
|
| 320 |
+
# pytorch-lightning
|
| 321 |
+
# torchmetrics
|
| 322 |
+
# torchvision
|
| 323 |
+
torchmetrics==1.3.1
|
| 324 |
+
# via
|
| 325 |
+
# cnocr
|
| 326 |
+
# pytorch-lightning
|
| 327 |
+
torchvision==0.17.0
|
| 328 |
+
# via
|
| 329 |
+
# -r requirements.in
|
| 330 |
+
# cnocr
|
| 331 |
+
# cnstd
|
| 332 |
+
# easyocr
|
| 333 |
+
tqdm==4.66.2
|
| 334 |
+
# via
|
| 335 |
+
# -r requirements.in
|
| 336 |
+
# cnocr
|
| 337 |
+
# cnstd
|
| 338 |
+
# datasets
|
| 339 |
+
# evaluate
|
| 340 |
+
# huggingface-hub
|
| 341 |
+
# pytorch-lightning
|
| 342 |
+
# transformers
|
| 343 |
+
transformers[sentencepiece]==4.37.2
|
| 344 |
+
# via
|
| 345 |
+
# -r requirements.in
|
| 346 |
+
# optimum
|
| 347 |
+
typing-extensions==4.9.0
|
| 348 |
+
# via
|
| 349 |
+
# huggingface-hub
|
| 350 |
+
# lightning-utilities
|
| 351 |
+
# pytorch-lightning
|
| 352 |
+
# torch
|
| 353 |
+
# wandb
|
| 354 |
+
tzdata==2024.1
|
| 355 |
+
# via pandas
|
| 356 |
+
unidecode==1.3.8
|
| 357 |
+
# via cnstd
|
| 358 |
+
urllib3==2.2.0
|
| 359 |
+
# via
|
| 360 |
+
# requests
|
| 361 |
+
# responses
|
| 362 |
+
# sentry-sdk
|
| 363 |
+
wandb==0.16.3
|
| 364 |
+
# via cnocr
|
| 365 |
+
xxhash==3.4.1
|
| 366 |
+
# via
|
| 367 |
+
# datasets
|
| 368 |
+
# evaluate
|
| 369 |
+
yarl==1.9.4
|
| 370 |
+
# via aiohttp
|
| 371 |
+
zipp==3.17.0
|
| 372 |
+
# via importlib-resources
|
| 373 |
+
|
| 374 |
+
doclayout-yolo<0.1
|
| 375 |
+
litellm<2.0
|
| 376 |
+
|
| 377 |
+
# The following packages are considered to be unsafe in a requirements file:
|
| 378 |
+
# setuptools
|
| 379 |
+
|
| 380 |
+
# for mkdocs
|
| 381 |
+
pygments==2.11
|
| 382 |
+
jinja2<3.1.0
|
| 383 |
+
mkdocs==1.2.2
|
| 384 |
+
mkdocs-macros-plugin==0.6.0
|
| 385 |
+
mkdocs-material==7.3.0
|
| 386 |
+
mkdocs-material-extensions==1.0.3
|
| 387 |
+
mkdocstrings==0.16.1
|
docs/train.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Model Train
|
| 2 |
+
|
| 3 |
+
TODO
|
docs/usage.md
ADDED
|
@@ -0,0 +1,547 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Usage
|
| 2 |
+
|
| 3 |
+
## 模型文件自动下载
|
| 4 |
+
|
| 5 |
+
首次使用 **Pix2Text** 时,系统会**自动下载**所需的开源模型,并存于 `~/.pix2text` 目录(Windows下默认路径为 `C:\Users\<username>\AppData\Roaming\pix2text`)。
|
| 6 |
+
CnOCR 和 CnSTD 中的模型分别存于 `~/.cnocr` 和 `~/.cnstd` 中(Windows 下默认路径为 `C:\Users\<username>\AppData\Roaming\cnocr` 和 `C:\Users\<username>\AppData\Roaming\cnstd`)。
|
| 7 |
+
下载过程请耐心等待,无法科学上网时系统会自动尝试其他可用站点进行下载,所以可能需要等待较长时间。
|
| 8 |
+
对于没有网络连接的机器,可以先把模型下载到其他机器上,然后拷贝到对应目录。
|
| 9 |
+
|
| 10 |
+
如果系统无法自动成功下载模型文件,则需要手动下载模型文件,可以参考 [huggingface.co/breezedeus](https://huggingface.co/breezedeus) ([国内镜像](https://hf-mirror.com/breezedeus))自己手动下载。
|
| 11 |
+
|
| 12 |
+
具体说明见 [模型下载](models.md)。
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
## 初始化
|
| 16 |
+
### 方法一
|
| 17 |
+
|
| 18 |
+
类 [Pix2Text](pix2text/pix_to_text.md) 是识别主类,包含了多个识别函数识别不同类型的 **图片** 或 **PDF文件** 中的内容。类 `Pix2Text` 的初始化函数如下:
|
| 19 |
+
|
| 20 |
+
```python
|
| 21 |
+
class Pix2Text(object):
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
*,
|
| 25 |
+
layout_parser: Optional[LayoutParser] = None,
|
| 26 |
+
text_formula_ocr: Optional[TextFormulaOCR] = None,
|
| 27 |
+
table_ocr: Optional[TableOCR] = None,
|
| 28 |
+
**kwargs,
|
| 29 |
+
):
|
| 30 |
+
"""
|
| 31 |
+
Initialize the Pix2Text object.
|
| 32 |
+
Args:
|
| 33 |
+
layout_parser (LayoutParser): The layout parser object; default value is `None`, which means to create a default one
|
| 34 |
+
text_formula_ocr (TextFormulaOCR): The text and formula OCR object; default value is `None`, which means to create a default one
|
| 35 |
+
table_ocr (TableOCR): The table OCR object; default value is `None`, which means not to recognize tables
|
| 36 |
+
**kwargs (dict): Other arguments, currently not used
|
| 37 |
+
"""
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
其中的几个参数含义如下:
|
| 41 |
+
|
| 42 |
+
* `layout_parser`:版面分析模型对象,默认值为 `None`,表示使用默认的版面分析模型;
|
| 43 |
+
* `text_formula_ocr`:文字与公式识别模型对象,默认值为 `None`,表示使用默认的文字与公式识别模型;
|
| 44 |
+
* `table_ocr`:表格识别模型对象,默认值为 `None`,表示不识别表格;
|
| 45 |
+
* `**kwargs`:其他参数,目前未使用。
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
每个参数都有默认取值,所以可以不传入任何参数值进行初始化:`p2t = Pix2Text()`。但请注意,如果不传入任何参数值,那么只会导入默认的版面分析模型和文字与公式识别模型,而**不会导入表格识别模型**。
|
| 49 |
+
|
| 50 |
+
初始化 Pix2Text 实例的更好的方法是使用以下的函数。
|
| 51 |
+
|
| 52 |
+
### 方法二
|
| 53 |
+
可以通过指定配置信息来初始化 `Pix2Text` 类的实例:
|
| 54 |
+
|
| 55 |
+
```python
|
| 56 |
+
@classmethod
|
| 57 |
+
def from_config(
|
| 58 |
+
cls,
|
| 59 |
+
total_configs: Optional[dict] = None,
|
| 60 |
+
enable_formula: bool = True,
|
| 61 |
+
enable_table: bool = True,
|
| 62 |
+
device: str = None,
|
| 63 |
+
**kwargs,
|
| 64 |
+
):
|
| 65 |
+
"""
|
| 66 |
+
Create a Pix2Text object from the configuration.
|
| 67 |
+
Args:
|
| 68 |
+
total_configs (dict): The total configuration; default value is `None`, which means to use the default configuration.
|
| 69 |
+
If not None, it should contain the following keys:
|
| 70 |
+
|
| 71 |
+
* `layout`: The layout parser configuration
|
| 72 |
+
* `text_formula`: The TextFormulaOCR configuration
|
| 73 |
+
* `table`: The table OCR configuration
|
| 74 |
+
enable_formula (bool): Whether to enable formula recognition; default value is `True`
|
| 75 |
+
enable_table (bool): Whether to enable table recognition; default value is `True`
|
| 76 |
+
device (str): The device to run the model; optional values are 'cpu', 'gpu' or 'cuda';
|
| 77 |
+
default value is `None`, which means to select the device automatically
|
| 78 |
+
**kwargs (dict): Other arguments
|
| 79 |
+
|
| 80 |
+
Returns: a Pix2Text object
|
| 81 |
+
|
| 82 |
+
"""
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
其中的几个参数含义如下:
|
| 86 |
+
|
| 87 |
+
* `total_configs`:总配置,包含以下几个键值:
|
| 88 |
+
- `layout`:版面分析模型的配置;
|
| 89 |
+
- `text_formula`:文字与公式识别模型的配置;
|
| 90 |
+
- `table`:表格识别模型的配置;
|
| 91 |
+
默认值为 `None`,表示使用默认配置。
|
| 92 |
+
* `enable_formula`:是否启用公式识别,默认值为 `True`;
|
| 93 |
+
* `enable_table`:是否启用表格识别,默认值为 `True`;
|
| 94 |
+
* `device`:运行模型的设备,可选值为 `'cpu'`, `'gpu'` 或 `'cuda'`,默认值为 `None`,表示自动选择设备;
|
| 95 |
+
* `**kwargs`:其他参数,目前未使用。
|
| 96 |
+
|
| 97 |
+
这个函数的返回值是一个 `Pix2Text` 类的实例,可以直接使用这个实例进行识别。
|
| 98 |
+
|
| 99 |
+
推荐使用此函数初始化 Pix2Text 的实例,如:`p2t = Pix2Text.from_config()`。
|
| 100 |
+
|
| 101 |
+
一个包含配置信息的示例如下:
|
| 102 |
+
|
| 103 |
+
```python
|
| 104 |
+
import os
|
| 105 |
+
from pix2text import Pix2Text
|
| 106 |
+
|
| 107 |
+
text_formula_config = dict(
|
| 108 |
+
languages=('en', 'ch_sim'), # 设置识别的语言
|
| 109 |
+
mfd=dict( # 声明 MFD 的初始化参数
|
| 110 |
+
model_path=os.path.expanduser(
|
| 111 |
+
'~/.pix2text/1.1/mfd-onnx/mfd-v20240618.onnx'
|
| 112 |
+
), # 注:修改成你的模型文件所存储的路径
|
| 113 |
+
),
|
| 114 |
+
formula=dict(
|
| 115 |
+
model_name='mfr-pro',
|
| 116 |
+
model_backend='onnx',
|
| 117 |
+
model_dir=os.path.expanduser(
|
| 118 |
+
'~/.pix2text/1.1/mfr-pro-onnx'
|
| 119 |
+
), # 注:修改成你的模型文件所存储的路径
|
| 120 |
+
),
|
| 121 |
+
text=dict(
|
| 122 |
+
rec_model_name='doc-densenet_lite_666-gru_large',
|
| 123 |
+
rec_model_backend='onnx',
|
| 124 |
+
rec_model_fp=os.path.expanduser(
|
| 125 |
+
'~/.cnocr/2.3/doc-densenet_lite_666-gru_large/cnocr-v2.3-doc-densenet_lite_666-gru_large-epoch=005-ft-model.onnx'
|
| 126 |
+
# noqa
|
| 127 |
+
), # 注:修改成你的模型文件所存储的路径
|
| 128 |
+
),
|
| 129 |
+
)
|
| 130 |
+
total_config = {
|
| 131 |
+
'layout': {'scores_thresh': 0.45},
|
| 132 |
+
'text_formula': text_formula_config,
|
| 133 |
+
}
|
| 134 |
+
p2t = Pix2Text.from_config(total_configs=total_config)
|
| 135 |
+
```
|
| 136 |
+
|
| 137 |
+
使用 VLM API 做文字和公式识别的示例如下:
|
| 138 |
+
|
| 139 |
+
```python
|
| 140 |
+
import os
|
| 141 |
+
from pix2text import Pix2Text
|
| 142 |
+
|
| 143 |
+
model_name=os.getenv("GEMINI_MODEL") # "gemini/gemini-2.0-flash-lite"
|
| 144 |
+
api_key=os.getenv("GEMINI_API_KEY") # "<your-api-key>"
|
| 145 |
+
|
| 146 |
+
total_config = {
|
| 147 |
+
'layout': None,
|
| 148 |
+
'text_formula': {
|
| 149 |
+
"model_type": "VlmTextFormulaOCR", # 指定类名
|
| 150 |
+
"model_name": model_name,
|
| 151 |
+
"api_key": api_key,
|
| 152 |
+
},
|
| 153 |
+
"table": {
|
| 154 |
+
"model_type": "VlmTableOCR", # 指定类名
|
| 155 |
+
"model_name": model_name,
|
| 156 |
+
"api_key": api_key,
|
| 157 |
+
},
|
| 158 |
+
}
|
| 159 |
+
p2t = Pix2Text.from_config(total_configs=total_config)
|
| 160 |
+
```
|
| 161 |
+
`model_name` 和 `api_key` 的取值,具体可参考 [LiteLLM 文档](https://docs.litellm.ai/docs/)。
|
| 162 |
+
|
| 163 |
+
更多初始化的示例请参见 [tests/test_pix2text.py](https://github.com/breezedeus/Pix2Text/blob/main/tests/test_pix2text.py)。
|
| 164 |
+
|
| 165 |
+
## 各种识别接口
|
| 166 |
+
|
| 167 |
+
类 `Pix2Text` 提供了不同的识别函数来识别不同类似的图片或者 PDF 文件内容,下面分别说明。
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
### 1. 函数 `.recognize_pdf()`
|
| 171 |
+
|
| 172 |
+
此函数用于识别一整个 PDF 文件中的内容。**PDF 文件的内容可以只包含图片而无文字内容**,
|
| 173 |
+
如示例文件 [examples/test-doc.pdf](examples/test-doc.pdf)。
|
| 174 |
+
识别时,可以指定识别的页数,也可以指定识别的 PDF 文件编号。
|
| 175 |
+
函数定义如下:
|
| 176 |
+
|
| 177 |
+
```python
|
| 178 |
+
def recognize_pdf(
|
| 179 |
+
self,
|
| 180 |
+
pdf_fp: Union[str, Path],
|
| 181 |
+
pdf_number: int = 0,
|
| 182 |
+
pdf_id: Optional[str] = None,
|
| 183 |
+
page_numbers: Optional[List[int]] = None,
|
| 184 |
+
**kwargs,
|
| 185 |
+
) -> Document:
|
| 186 |
+
"""
|
| 187 |
+
recognize a pdf file
|
| 188 |
+
Args:
|
| 189 |
+
pdf_fp (Union[str, Path]): pdf file path
|
| 190 |
+
pdf_number (int): pdf number
|
| 191 |
+
pdf_id (str): pdf id
|
| 192 |
+
page_numbers (List[int]): page numbers to recognize; default is `None`, which means to recognize all pages
|
| 193 |
+
kwargs (dict): Optional keyword arguments. The same as `recognize_page`
|
| 194 |
+
|
| 195 |
+
Returns: a Document object. Use `doc.to_markdown('output-dir')` to get the markdown output of the recognized document.
|
| 196 |
+
|
| 197 |
+
"""
|
| 198 |
+
```
|
| 199 |
+
|
| 200 |
+
**函数说明**:
|
| 201 |
+
|
| 202 |
+
* 输入参数 `pdf_fp`:PDF 文件的路径;
|
| 203 |
+
* 输入参数 `pdf_number`:PDF 文件的编号,默认值为 `0`;
|
| 204 |
+
* 输入参数 `pdf_id`:PDF 文件的 ID,默认值为 `None`;
|
| 205 |
+
* 输入参数 `page_numbers`:需要识别的页码列表(页码从 0 开始计数,如 `[0, 1]` 表示只识别文件的第 1、2 页内容),默认值为 `None`,表示识别所有页;
|
| 206 |
+
* 输入参数 `**kwargs`:其他参数,具体说明参考下面的函数 `recognize_page()`。
|
| 207 |
+
|
| 208 |
+
**返回值**:返回一个 `Document` 对象,可以使用 `doc.to_markdown('output-dir')` 来获取识别结果的 markdown 输出。
|
| 209 |
+
|
| 210 |
+
**调用示例**:
|
| 211 |
+
|
| 212 |
+
```python
|
| 213 |
+
from pix2text import Pix2Text
|
| 214 |
+
|
| 215 |
+
img_fp = 'examples/test-doc.pdf'
|
| 216 |
+
p2t = Pix2Text.from_config()
|
| 217 |
+
out_md = p2t.recognize_pdf(
|
| 218 |
+
img_fp,
|
| 219 |
+
page_numbers=[0, 1],
|
| 220 |
+
table_as_image=True,
|
| 221 |
+
save_debug_res=f'./output-debug',
|
| 222 |
+
)
|
| 223 |
+
out_md.to_markdown('output-pdf-md')
|
| 224 |
+
```
|
| 225 |
+
|
| 226 |
+
### 2. 函数 `.recognize_page()`
|
| 227 |
+
|
| 228 |
+
此函数用于识别一张包含复杂排版的页面图片中的内容。图片可以包含多列、图片、表格等内容,如示例图片 [examples/page2.png](examples/page2.png)。
|
| 229 |
+
函数定义如下:
|
| 230 |
+
|
| 231 |
+
```python
|
| 232 |
+
def recognize_page(
|
| 233 |
+
self,
|
| 234 |
+
img: Union[str, Path, Image.Image],
|
| 235 |
+
page_number: int = 0,
|
| 236 |
+
page_id: Optional[str] = None,
|
| 237 |
+
**kwargs,
|
| 238 |
+
) -> Page:
|
| 239 |
+
"""
|
| 240 |
+
Analyze the layout of the image, and then recognize the information contained in each section.
|
| 241 |
+
|
| 242 |
+
Args:
|
| 243 |
+
img (str or Image.Image): an image path, or `Image.Image` loaded by `Image.open()`
|
| 244 |
+
page_number (str): page number; default value is `0`
|
| 245 |
+
page_id (str): page id; default value is `None`, which means to use the `str(page_number)`
|
| 246 |
+
kwargs ():
|
| 247 |
+
* resized_shape (int): Resize the image width to this size for processing; default value is `768`
|
| 248 |
+
* mfr_batch_size (int): batch size for MFR; When running on GPU, this value is suggested to be set to greater than 1; default value is `1`
|
| 249 |
+
* embed_sep (tuple): Prefix and suffix for embedding latex; only effective when `return_text` is `True`; default value is `(' $', '$ ')`
|
| 250 |
+
* isolated_sep (tuple): Prefix and suffix for isolated latex; only effective when `return_text` is `True`; default value is two-dollar signs
|
| 251 |
+
* line_sep (str): The separator between lines of text; only effective when `return_text` is `True`; default value is a line break
|
| 252 |
+
* auto_line_break (bool): Automatically line break the recognized text; only effective when `return_text` is `True`; default value is `True`
|
| 253 |
+
* det_text_bbox_max_width_expand_ratio (float): Expand the width of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.3`
|
| 254 |
+
* det_text_bbox_max_height_expand_ratio (float): Expand the height of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.2`
|
| 255 |
+
* embed_ratio_threshold (float): The overlap threshold for embed formulas and text lines; default value is `0.6`.
|
| 256 |
+
When the overlap between an embed formula and a text line is greater than or equal to this threshold,
|
| 257 |
+
the embed formula and the text line are considered to be on the same line;
|
| 258 |
+
otherwise, they are considered to be on different lines.
|
| 259 |
+
* table_as_image (bool): If `True`, the table will be recognized as an image (don't parse the table content as text) ; default value is `False`
|
| 260 |
+
* title_contain_formula (bool): If `True`, the title of the page will be recognized as a mixed image (text and formula). If `False`, it will be recognized as a text; default value is `False`
|
| 261 |
+
* text_contain_formula (bool): If `True`, the text of the page will be recognized as a mixed image (text and formula). If `False`, it will be recognized as a text; default value is `True`
|
| 262 |
+
* formula_rec_kwargs (dict): generation arguments passed to formula recognizer `latex_ocr`; default value is `{}`
|
| 263 |
+
* save_debug_res (str): if `save_debug_res` is set, the directory to save the debug results; default value is `None`, which means not to save
|
| 264 |
+
|
| 265 |
+
Returns: a Page object. Use `page.to_markdown('output-dir')` to get the markdown output of the recognized page.
|
| 266 |
+
"""
|
| 267 |
+
```
|
| 268 |
+
|
| 269 |
+
**函数说明**:
|
| 270 |
+
|
| 271 |
+
* 输入参数 `img`:图片路径或者 `Image.Image` 对象;
|
| 272 |
+
* 输入参数 `page_number`:页码,默认值为 `0`;
|
| 273 |
+
* 输入参数 `page_id`:页码 ID,默认值为 `None`,此时会使用 `str(page_number)` 作为其取值;
|
| 274 |
+
* kwargs:其他参数,具体说明如下:
|
| 275 |
+
- `resized_shape`:调整图片的宽度为此大小以进行处理,默认值为 `768`;
|
| 276 |
+
- `mfr_batch_size`:MFR 预测时使用的批大小;在 GPU 上运行时,建议将此值设置为大于 `1`;默认值为 `1`;
|
| 277 |
+
- `embed_sep`:嵌入 LaTeX 的前缀和后缀;仅在 `return_text` 为 `True` 时有效;默认值为 `(' $', '$ ')`;
|
| 278 |
+
- `isolated_sep`:孤立 LaTeX 的前缀和后缀;仅在 `return_text` 为 `True` 时有效;默认值为两个美元符号;
|
| 279 |
+
- `line_sep`:文本行之间的分隔符;仅在 `return_text` 为 `True` 时有效;默认值为换行符;
|
| 280 |
+
- `auto_line_break`:自动换行识别的文本;仅在 `return_text` 为 `True` 时有效;默认值为 `True`;
|
| 281 |
+
- `det_text_bbox_max_width_expand_ratio`:扩展检测文本框的宽度。此值表示相对于原始框高度的最大扩展比率;默认值为 `0.3`;
|
| 282 |
+
- `det_text_bbox_max_height_expand_ratio`:扩展检测文本框的高度。此值表示相对于原始框高度的最大扩展比率;默认值为 `0.2`;
|
| 283 |
+
- `embed_ratio_threshold`:嵌入公式和文本行之间的重叠阈值;默认值为 `0.6`。当嵌入公式和文本行之间的重叠大于或等于此阈值时,认为嵌入公式和文本行在同一行;否则,认为它们在不同行
|
| 284 |
+
- `table_as_image`:如果为 `True`,则将表格识别为图像(不将表格内容解析为文本);默认值为 `False`
|
| 285 |
+
- `title_contain_formula`:如果为 `True`,则将页面标题作为为混合图像(文本和公式)进行识别。如果为 `False`,则将其作为文本图片进行识别(不识别公式);默认值为 `False`
|
| 286 |
+
- `text_contain_formula`:如果为 `True`,则将页面文本作为混合图像(文本和公式)进行识别。如果为 `False`,则将其作为文本进行识别(不识别公式);默认值为 `True`
|
| 287 |
+
- `formula_rec_kwargs`:传递给公式识别器 `latex_ocr` 的生成参数;默认值为 `{}`
|
| 288 |
+
- `save_debug_res`:如果设置了 `save_debug_res`,则把各种中间的解析结果存入此目录以便于调试;默认值为 `None`,表示不保存
|
| 289 |
+
|
| 290 |
+
**返回值**:返回一个 `Page` 对象,可以使用 `page.to_markdown('output-dir')` 来获取识别结果的 markdown 输出。
|
| 291 |
+
|
| 292 |
+
**调用示例**:
|
| 293 |
+
|
| 294 |
+
```python
|
| 295 |
+
from pix2text import Pix2Text
|
| 296 |
+
|
| 297 |
+
img_fp = 'examples/page2.png'
|
| 298 |
+
p2t = Pix2Text.from_config()
|
| 299 |
+
out_page = p2t.recognize_page(
|
| 300 |
+
img_fp,
|
| 301 |
+
title_contain_formula=False,
|
| 302 |
+
text_contain_formula=False,
|
| 303 |
+
save_debug_res=f'./output-debug',
|
| 304 |
+
)
|
| 305 |
+
out_page.to_markdown('output-page-md')
|
| 306 |
+
```
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
### 3. 函数 `.recognize_text_formula()`
|
| 310 |
+
|
| 311 |
+
此函数用于识别一张包含文字和公式的图片(如段落截图)中的内容,如示例图片 [examples/mixed.jpg](examples/mixed.jpg)。
|
| 312 |
+
函数定义如下:
|
| 313 |
+
|
| 314 |
+
```python
|
| 315 |
+
def recognize_text_formula(
|
| 316 |
+
self, img: Union[str, Path, Image.Image], return_text: bool = True, **kwargs,
|
| 317 |
+
) -> Union[str, List[str], List[Any], List[List[Any]]]:
|
| 318 |
+
"""
|
| 319 |
+
Analyze the layout of the image, and then recognize the information contained in each section.
|
| 320 |
+
|
| 321 |
+
Args:
|
| 322 |
+
img (str or Image.Image): an image path, or `Image.Image` loaded by `Image.open()`
|
| 323 |
+
return_text (bool): Whether to return the recognized text; default value is `True`
|
| 324 |
+
kwargs ():
|
| 325 |
+
* resized_shape (int): Resize the image width to this size for processing; default value is `768`
|
| 326 |
+
* save_analysis_res (str): Save the mfd result image in this file; default is `None`, which means not to save
|
| 327 |
+
* mfr_batch_size (int): batch size for MFR; When running on GPU, this value is suggested to be set to greater than 1; default value is `1`
|
| 328 |
+
* embed_sep (tuple): Prefix and suffix for embedding latex; only effective when `return_text` is `True`; default value is `(' $', '$ ')`
|
| 329 |
+
* isolated_sep (tuple): Prefix and suffix for isolated latex; only effective when `return_text` is `True`; default value is two-dollar signs
|
| 330 |
+
* line_sep (str): The separator between lines of text; only effective when `return_text` is `True`; default value is a line break
|
| 331 |
+
* auto_line_break (bool): Automatically line break the recognized text; only effective when `return_text` is `True`; default value is `True`
|
| 332 |
+
* det_text_bbox_max_width_expand_ratio (float): Expand the width of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.3`
|
| 333 |
+
* det_text_bbox_max_height_expand_ratio (float): Expand the height of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.2`
|
| 334 |
+
* embed_ratio_threshold (float): The overlap threshold for embed formulas and text lines; default value is `0.6`.
|
| 335 |
+
When the overlap between an embed formula and a text line is greater than or equal to this threshold,
|
| 336 |
+
the embed formula and the text line are considered to be on the same line;
|
| 337 |
+
otherwise, they are considered to be on different lines.
|
| 338 |
+
* table_as_image (bool): If `True`, the table will be recognized as an image; default value is `False`
|
| 339 |
+
* formula_rec_kwargs (dict): generation arguments passed to formula recognizer `latex_ocr`; default value is `{}`
|
| 340 |
+
|
| 341 |
+
Returns: a str when `return_text` is `True`; or a list of ordered (top to bottom, left to right) dicts when `return_text` is `False`,
|
| 342 |
+
with each dict representing one detected box, containing keys:
|
| 343 |
+
|
| 344 |
+
* `type`: The category of the image; Optional: 'text', 'isolated', 'embedding'
|
| 345 |
+
* `text`: The recognized text or Latex formula
|
| 346 |
+
* `score`: The confidence score [0, 1]; the higher, the more confident
|
| 347 |
+
* `position`: Position information of the block, `np.ndarray`, with shape of [4, 2]
|
| 348 |
+
* `line_number`: The line number of the box (first line `line_number==0`), boxes with the same value indicate they are on the same line
|
| 349 |
+
|
| 350 |
+
"""
|
| 351 |
+
```
|
| 352 |
+
|
| 353 |
+
**函数说明**:
|
| 354 |
+
|
| 355 |
+
* 输入参数 `img`:图片路径或者 `Image.Image` 对象;
|
| 356 |
+
* 输入参数 `return_text`:是否返回纯文本;取值为 `False` 时返回带有结构化信息的 list;默认值为 `True`;
|
| 357 |
+
* 输入参数 `kwargs`:其他参数,具体说明如下:
|
| 358 |
+
- `resized_shape`:调整图片的宽度为此大小以进行处理,默认值为 `768`;
|
| 359 |
+
- `save_analysis_res`:保存 MFD 解析结果图像的文件名;默认值为 `None`,表示不保存;
|
| 360 |
+
- `mfr_batch_size`:MFR 预测时使用的批大小;在 GPU 上运行时,建议将此值设置为大于 `1`;默认值为 `1`;
|
| 361 |
+
- `embed_sep`:嵌入 LaTeX 的前缀和后缀;仅在 `return_text` 为 `True` 时有效;默认值为 `(' $', '$ ')`;
|
| 362 |
+
- `isolated_sep`:孤立 LaTeX 的前缀和后缀;仅在 `return_text` 为 `True` 时有效;默认值为两个美元符号;
|
| 363 |
+
- `line_sep`:文本行之间的分隔符;仅在 `return_text` 为 `True` 时有效;默认值为换行符;
|
| 364 |
+
- `auto_line_break`:自动换行识别的文本;仅在 `return_text` 为 `True` 时有效;默认值为 `True`;
|
| 365 |
+
- `det_text_bbox_max_width_expand_ratio`:扩展检测文本框的宽度。此值表示相对于原始框高度的最大扩展比率;默认值为 `0.3`;
|
| 366 |
+
- `det_text_bbox_max_height_expand_ratio`:扩展检测文本框的高度。此值表示相对于原始框高度的最大扩展比率;默认值为 `0.2`;
|
| 367 |
+
- `embed_ratio_threshold`:嵌入公式和文本行���间的重叠阈值;默认值为 `0.6`。当嵌入公式和文本行之间的重叠大于或等于此阈值时,认为嵌入公式和文本行在同一行;否则,认
|
| 368 |
+
- `table_as_image`:如果为 `True`,则将表格识别为图像;默认值为 `False`
|
| 369 |
+
- `formula_rec_kwargs`:传递给公式识别器 `latex_ocr` 的生成参数;默认值为 `{}`
|
| 370 |
+
|
| 371 |
+
**返回值**:当 `return_text` 为 `True` 时,返回一个字符串;当 `return_text` 为 `False` 时,返回一个有序的(从上到下,从左到右)字典列表,每个字典表示一个检测框,包含以下键值:
|
| 372 |
+
- `type`:图像的类别;可选值:'text'、'isolated'、'embedding'
|
| 373 |
+
- `text`:识别的文本或 LaTeX 公式
|
| 374 |
+
- `score`:置信度分数 [0, 1];分数越高,置信度越高
|
| 375 |
+
- `position`:块的位置信息,`np.ndarray`,形状为 `[4, 2]`
|
| 376 |
+
- `line_number`:框的行号(第一行 `line_number==0`),具有相同值的框表示它们在同一行
|
| 377 |
+
|
| 378 |
+
**调用示例**:
|
| 379 |
+
|
| 380 |
+
```python
|
| 381 |
+
from pix2text import Pix2Text
|
| 382 |
+
|
| 383 |
+
img_fp = 'examples/mixed.jpg'
|
| 384 |
+
p2t = Pix2Text.from_config()
|
| 385 |
+
out = p2t.recognize_text_formula(
|
| 386 |
+
img_fp,
|
| 387 |
+
save_analysis_res=f'./output-debug',
|
| 388 |
+
)
|
| 389 |
+
```
|
| 390 |
+
|
| 391 |
+
### 4. 函数 `.recognize_formula()`
|
| 392 |
+
|
| 393 |
+
此函数用于识别一张纯公式的图片中的内容,如示例图片 [examples/formula2.png](examples/formula2.png)。
|
| 394 |
+
函数定义如下:
|
| 395 |
+
|
| 396 |
+
```python
|
| 397 |
+
def recognize_formula(
|
| 398 |
+
self,
|
| 399 |
+
imgs: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]],
|
| 400 |
+
batch_size: int = 1,
|
| 401 |
+
return_text: bool = True,
|
| 402 |
+
rec_config: Optional[dict] = None,
|
| 403 |
+
**kwargs,
|
| 404 |
+
) -> Union[str, List[str], Dict[str, Any], List[Dict[str, Any]]]:
|
| 405 |
+
"""
|
| 406 |
+
Recognize pure Math Formula images to LaTeX Expressions
|
| 407 |
+
Args:
|
| 408 |
+
imgs (Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]): The image or list of images
|
| 409 |
+
batch_size (int): The batch size
|
| 410 |
+
return_text (bool): Whether to return only the recognized text; default value is `True`
|
| 411 |
+
rec_config (Optional[dict]): The config for recognition
|
| 412 |
+
**kwargs (): Special model parameters. Not used for now
|
| 413 |
+
|
| 414 |
+
Returns: The LaTeX Expression or list of LaTeX Expressions;
|
| 415 |
+
str or List[str] when `return_text` is True;
|
| 416 |
+
Dict[str, Any] or List[Dict[str, Any]] when `return_text` is False, with the following keys:
|
| 417 |
+
|
| 418 |
+
* `text`: The recognized LaTeX text
|
| 419 |
+
* `score`: The confidence score [0, 1]; the higher, the more confident
|
| 420 |
+
|
| 421 |
+
"""
|
| 422 |
+
```
|
| 423 |
+
|
| 424 |
+
**函数说明**:
|
| 425 |
+
|
| 426 |
+
* 输入参数 `imgs`:图片路径或者 `Image.Image` 对象,或者图片路径或者 `Image.Image` 对象的列表;
|
| 427 |
+
* 输入参数 `batch_size`:批大小,默认值为 `1`;
|
| 428 |
+
* 输入参数 `return_text`:是否返回纯文本;取值为 `False` 时返回带有结构化信息的 list;默认值为 `True`;
|
| 429 |
+
* 输入参数 `rec_config`:识别配置,可选值;
|
| 430 |
+
* 输入参数 `kwargs`:其他参数,目前未使用。
|
| 431 |
+
|
| 432 |
+
**返回值**:当 `return_text` 为 `True` 时,返回一个字符串;当 `return_text` 为 `False` 时,返回一个有序的(从上到下,从左到右)字典列表,每个字典表示一个检测框,包含以下键值:
|
| 433 |
+
- `text`:识别的 LaTeX 文本
|
| 434 |
+
- `score`:置信度分数 [0, 1];分数越高,置信度越高
|
| 435 |
+
|
| 436 |
+
**调用示例**:
|
| 437 |
+
|
| 438 |
+
```python
|
| 439 |
+
from pix2text import Pix2Text
|
| 440 |
+
|
| 441 |
+
img_fp = 'examples/formula2.png'
|
| 442 |
+
p2t = Pix2Text.from_config()
|
| 443 |
+
out = p2t.recognize_formula(
|
| 444 |
+
img_fp,
|
| 445 |
+
save_analysis_res=f'./output-debug',
|
| 446 |
+
)
|
| 447 |
+
```
|
| 448 |
+
|
| 449 |
+
### 5. 函数 `.recognize_text()`
|
| 450 |
+
|
| 451 |
+
此函数用于识别一张纯文字的图片中的内容,如示例图片 [examples/general.jpg](examples/general.jpg)。
|
| 452 |
+
函数定义如下:
|
| 453 |
+
|
| 454 |
+
```python
|
| 455 |
+
def recognize_text(
|
| 456 |
+
self,
|
| 457 |
+
imgs: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]],
|
| 458 |
+
return_text: bool = True,
|
| 459 |
+
rec_config: Optional[dict] = None,
|
| 460 |
+
**kwargs,
|
| 461 |
+
) -> Union[str, List[str], List[Any], List[List[Any]]]:
|
| 462 |
+
"""
|
| 463 |
+
Recognize a pure Text Image.
|
| 464 |
+
Args:
|
| 465 |
+
imgs (Union[str, Path, Image.Image], List[str], List[Path], List[Image.Image]): The image or list of images
|
| 466 |
+
return_text (bool): Whether to return only the recognized text; default value is `True`
|
| 467 |
+
rec_config (Optional[dict]): The config for recognition
|
| 468 |
+
kwargs (): Other parameters for `text_ocr.ocr()`
|
| 469 |
+
|
| 470 |
+
Returns: Text str or list of text strs when `return_text` is True;
|
| 471 |
+
`List[Any]` or `List[List[Any]]` when `return_text` is False, with the same length as `imgs` and the following keys:
|
| 472 |
+
|
| 473 |
+
* `position`: Position information of the block, `np.ndarray`, with a shape of [4, 2]
|
| 474 |
+
* `text`: The recognized text
|
| 475 |
+
* `score`: The confidence score [0, 1]; the higher, the more confident
|
| 476 |
+
|
| 477 |
+
"""
|
| 478 |
+
```
|
| 479 |
+
|
| 480 |
+
**函数说明**:
|
| 481 |
+
|
| 482 |
+
* 输入参数 `imgs`:图片路径或者 `Image.Image` 对象,或者图片路径或者 `Image.Image` 对象的列表;
|
| 483 |
+
* 输入参数 `return_text`:是否返回纯文本;取值为 `False` 时返回带有结构化信息的 list;默认值为 `True`;
|
| 484 |
+
* 输入参数 `rec_config`:识别配置,可选值;
|
| 485 |
+
* 输入参数 `kwargs`:其他参数,具体说明参考函数 `text_ocr.ocr()`。
|
| 486 |
+
|
| 487 |
+
**返回值**:当 `return_text` 为 `True` 时,返回一个字符串;当 `return_text` 为 `False` 时,返回一个有序的(从上到下,从左到右)字典列表,每个字典表示一个检测框,包含以下键值:
|
| 488 |
+
- `position`:块的位置信息,`np.ndarray`,形状为 `[4, 2]`
|
| 489 |
+
- `text`:识别的文本
|
| 490 |
+
- `score`:置信度分数 [0, 1];分数越高,置信度越高
|
| 491 |
+
|
| 492 |
+
**调用示例**:
|
| 493 |
+
|
| 494 |
+
```python
|
| 495 |
+
from pix2text import Pix2Text
|
| 496 |
+
|
| 497 |
+
img_fp = 'examples/general.jpg'
|
| 498 |
+
p2t = Pix2Text.from_config()
|
| 499 |
+
out = p2t.recognize_text(img_fp)
|
| 500 |
+
```
|
| 501 |
+
|
| 502 |
+
### 6. 函数 `.recognize()`
|
| 503 |
+
|
| 504 |
+
是不是觉得上面的接口太丰富了,使用起来有点麻烦?没关系,这个函数可以根据指定的图片类型调用上面的不同函数进行识别。
|
| 505 |
+
|
| 506 |
+
```python
|
| 507 |
+
def recognize(
|
| 508 |
+
self,
|
| 509 |
+
img: Union[str, Path, Image.Image],
|
| 510 |
+
file_type: Literal[
|
| 511 |
+
'pdf', 'page', 'text_formula', 'formula', 'text'
|
| 512 |
+
] = 'text_formula',
|
| 513 |
+
**kwargs,
|
| 514 |
+
) -> Union[Document, Page, str, List[str], List[Any], List[List[Any]]]:
|
| 515 |
+
"""
|
| 516 |
+
Recognize the content of the image or pdf file according to the specified type.
|
| 517 |
+
It will call the corresponding recognition function `.recognize_{file_type}()` according to the `file_type`.
|
| 518 |
+
Args:
|
| 519 |
+
img (Union[str, Path, Image.Image]): The image/pdf file path or `Image.Image` object
|
| 520 |
+
file_type (str): Supported image types: 'pdf', 'page', 'text_formula', 'formula', 'text'
|
| 521 |
+
**kwargs (dict): Arguments for the corresponding recognition function
|
| 522 |
+
|
| 523 |
+
Returns: recognized results
|
| 524 |
+
|
| 525 |
+
"""
|
| 526 |
+
```
|
| 527 |
+
|
| 528 |
+
**函数说明**:
|
| 529 |
+
|
| 530 |
+
* 输入参数 `img`:图片/PDF文件路径或者 `Image.Image` 对象;
|
| 531 |
+
* 输入参数 `file_type`:图片类型,可选值为 `'pdf'`, `'page'`, `'text_formula'`, `'formula'`, `'text'`;
|
| 532 |
+
* 输入参数 `kwargs`:其他参数,具体说明参考上面的函数。
|
| 533 |
+
|
| 534 |
+
**返回值**:根据 `file_type` 的不同,返回不同的结果。具体说明参考上面的函数。
|
| 535 |
+
|
| 536 |
+
**调用示例**:
|
| 537 |
+
|
| 538 |
+
```python
|
| 539 |
+
from pix2text import Pix2Text
|
| 540 |
+
|
| 541 |
+
img_fp = 'examples/general.jpg'
|
| 542 |
+
p2t = Pix2Text.from_config()
|
| 543 |
+
out = p2t.recognize(img_fp, file_type='text') # 等价于 p2t.recognize_text(img_fp)
|
| 544 |
+
```
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
更多使用示例请参见 [tests/test_pix2text.py](https://github.com/breezedeus/Pix2Text/blob/main/tests/test_pix2text.py)。
|
mkdocs.yml
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Project information
|
| 2 |
+
site_name: Pix2Text
|
| 3 |
+
site_url: https://pix2text.readthedocs.io
|
| 4 |
+
site_description: Pix2Text Online Documents
|
| 5 |
+
site_author: Breezedeus
|
| 6 |
+
|
| 7 |
+
# Repository
|
| 8 |
+
repo_url: https://github.com/breezedeus/pix2text
|
| 9 |
+
repo_name: Breezedeus/Pix2Text
|
| 10 |
+
edit_uri: "" #disables edit button
|
| 11 |
+
|
| 12 |
+
# Copyright
|
| 13 |
+
copyright: Copyright © 2022 - 2024
|
| 14 |
+
|
| 15 |
+
# Social media
|
| 16 |
+
extra:
|
| 17 |
+
social:
|
| 18 |
+
- icon: fontawesome/brands/github
|
| 19 |
+
link: https://github.com/breezedeus
|
| 20 |
+
- icon: fontawesome/brands/zhihu
|
| 21 |
+
link: https://www.zhihu.com/people/breezedeus-50
|
| 22 |
+
- icon: fontawesome/brands/youtube
|
| 23 |
+
link: https://www.youtube.com/@breezedeus
|
| 24 |
+
- icon: fontawesome/brands/youtube
|
| 25 |
+
link: https://space.bilibili.com/509307267
|
| 26 |
+
- icon: fontawesome/brands/twitter
|
| 27 |
+
link: https://twitter.com/breezedeus
|
| 28 |
+
|
| 29 |
+
# Configuration
|
| 30 |
+
theme:
|
| 31 |
+
name: material
|
| 32 |
+
# name: readthedocs
|
| 33 |
+
logo: figs/breezedeus.png
|
| 34 |
+
favicon: figs/breezedeus.ico
|
| 35 |
+
palette:
|
| 36 |
+
primary: indigo
|
| 37 |
+
accent: indigo
|
| 38 |
+
font:
|
| 39 |
+
text: Roboto
|
| 40 |
+
code: Roboto Mono
|
| 41 |
+
features:
|
| 42 |
+
- navigation.tabs
|
| 43 |
+
- navigation.expand
|
| 44 |
+
icon:
|
| 45 |
+
repo: fontawesome/brands/github
|
| 46 |
+
|
| 47 |
+
# Extensions
|
| 48 |
+
markdown_extensions:
|
| 49 |
+
- meta
|
| 50 |
+
- pymdownx.emoji:
|
| 51 |
+
emoji_index: !!python/name:materialx.emoji.twemoji
|
| 52 |
+
emoji_generator: !!python/name:materialx.emoji.to_svg
|
| 53 |
+
- admonition # alerts
|
| 54 |
+
- pymdownx.details # collapsible alerts
|
| 55 |
+
- pymdownx.superfences # nest code and content inside alerts
|
| 56 |
+
- attr_list # add HTML and CSS to Markdown elements
|
| 57 |
+
- md_in_html
|
| 58 |
+
- pymdownx.inlinehilite # inline code highlights
|
| 59 |
+
- pymdownx.keys # show keystroke symbols
|
| 60 |
+
- pymdownx.snippets # insert content from other files
|
| 61 |
+
- pymdownx.tabbed # content tabs
|
| 62 |
+
- footnotes
|
| 63 |
+
- def_list
|
| 64 |
+
- pymdownx.arithmatex: # mathjax
|
| 65 |
+
generic: true
|
| 66 |
+
- pymdownx.tasklist:
|
| 67 |
+
custom_checkbox: true
|
| 68 |
+
clickable_checkbox: false
|
| 69 |
+
- codehilite
|
| 70 |
+
- pymdownx.highlight:
|
| 71 |
+
use_pygments: true
|
| 72 |
+
- toc:
|
| 73 |
+
toc_depth: 4
|
| 74 |
+
|
| 75 |
+
# Plugins
|
| 76 |
+
plugins:
|
| 77 |
+
- search
|
| 78 |
+
- macros
|
| 79 |
+
- mkdocstrings:
|
| 80 |
+
default_handler: python
|
| 81 |
+
handlers:
|
| 82 |
+
python:
|
| 83 |
+
rendering:
|
| 84 |
+
show_root_heading: false
|
| 85 |
+
show_source: true
|
| 86 |
+
show_category_heading: true
|
| 87 |
+
watch:
|
| 88 |
+
- cnocr
|
| 89 |
+
|
| 90 |
+
# Extra CSS
|
| 91 |
+
extra_css:
|
| 92 |
+
- static/css/custom.css
|
| 93 |
+
|
| 94 |
+
# Extra JS
|
| 95 |
+
extra_javascript:
|
| 96 |
+
- https://cdnjs.cloudflare.com/ajax/libs/tablesort/5.2.1/tablesort.min.js
|
| 97 |
+
- https://polyfill.io/v3/polyfill.min.js?features=es6
|
| 98 |
+
- https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js
|
| 99 |
+
|
| 100 |
+
# Page tree
|
| 101 |
+
nav:
|
| 102 |
+
- 🏠 Home: index.md
|
| 103 |
+
- 🛠️ Install: install.md
|
| 104 |
+
- 🛀🏻 Demo: demo.md
|
| 105 |
+
- 🧳 Models: models.md
|
| 106 |
+
- 📚 Examples: examples.md
|
| 107 |
+
- 📖 Usage: usage.md
|
| 108 |
+
- 🎮 APIs:
|
| 109 |
+
- Pix2Text: pix2text/pix_to_text.md
|
| 110 |
+
- TextFormulaOCR: pix2text/text_formula_ocr.md
|
| 111 |
+
- LatexOCR: pix2text/latex_ocr.md
|
| 112 |
+
- TableOCR: pix2text/table_ocr.md
|
| 113 |
+
- 💬 Contact: contact.md
|
| 114 |
+
- 🎛️ More:
|
| 115 |
+
- 🏄🏻 ️Command Tools: command.md
|
| 116 |
+
- 🕹 Model Training: train.md
|
| 117 |
+
- 🗒 RELEASE Notes: RELEASE.md
|
| 118 |
+
- 🙋🏽 FAQ: faq.md
|
| 119 |
+
- 🥤 Buy Me Coffee: buymeacoffee.md
|
pix2text/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding: utf-8
|
| 2 |
+
# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix.
|
| 3 |
+
# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com).
|
| 4 |
+
|
| 5 |
+
from .utils import read_img, set_logger, merge_line_texts
|
| 6 |
+
from .render import render_html
|
| 7 |
+
from .doc_xl_layout import DocXLayoutParser
|
| 8 |
+
# from .layoutlmv3 import LayoutLMv3LayoutParser
|
| 9 |
+
# from .doc_yolo_layout_parser import DocYoloLayoutParser
|
| 10 |
+
from .latex_ocr import LatexOCR
|
| 11 |
+
from .formula_detector import MathFormulaDetector
|
| 12 |
+
from .text_formula_ocr import TextFormulaOCR
|
| 13 |
+
from .table_ocr import TableOCR
|
| 14 |
+
from .pix_to_text import Pix2Text
|
pix2text/__version__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding: utf-8
|
| 2 |
+
# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix.
|
| 3 |
+
# Copyright (C) 2022-2025, [Breezedeus](https://www.breezedeus.com).
|
| 4 |
+
|
| 5 |
+
__version__ = '1.1.4'
|
pix2text/app.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding: utf-8
|
| 2 |
+
# Copyright (C) 2022, [Breezedeus](https://github.com/breezedeus).
|
| 3 |
+
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import streamlit as st
|
| 6 |
+
|
| 7 |
+
from pix2text import set_logger, Pix2Text
|
| 8 |
+
|
| 9 |
+
logger = set_logger()
|
| 10 |
+
st.set_page_config(layout="wide")
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@st.cache(allow_output_mutation=True)
|
| 14 |
+
def get_model():
|
| 15 |
+
return Pix2Text()
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def main():
|
| 19 |
+
p2t = get_model()
|
| 20 |
+
|
| 21 |
+
title = '开源工具 <a href="https://github.com/breezedeus/pix2text">Pix2Text</a> Demo'
|
| 22 |
+
st.markdown(f"<h1 style='text-align: center;'>{title}</h1>", unsafe_allow_html=True)
|
| 23 |
+
|
| 24 |
+
subtitle = '作者:<a href="https://github.com/breezedeus">breezedeus</a>; ' \
|
| 25 |
+
'欢迎加入 <a href="https://cnocr.readthedocs.io/zh-cn/stable/contact/">交流群</a>'
|
| 26 |
+
|
| 27 |
+
st.markdown(f"<div style='text-align: center;'>{subtitle}</div>", unsafe_allow_html=True)
|
| 28 |
+
st.markdown('')
|
| 29 |
+
st.subheader('选择待识别图片')
|
| 30 |
+
content_file = st.file_uploader('', type=["png", "jpg", "jpeg", "webp"])
|
| 31 |
+
if content_file is None:
|
| 32 |
+
st.stop()
|
| 33 |
+
|
| 34 |
+
try:
|
| 35 |
+
img = Image.open(content_file).convert('RGB')
|
| 36 |
+
img.save('ori.jpg')
|
| 37 |
+
|
| 38 |
+
out = p2t(img)
|
| 39 |
+
logger.info(out)
|
| 40 |
+
st.markdown('##### 原始图片:')
|
| 41 |
+
cols = st.columns([1, 3, 1])
|
| 42 |
+
with cols[1]:
|
| 43 |
+
st.image(content_file)
|
| 44 |
+
|
| 45 |
+
st.subheader('识别结果:')
|
| 46 |
+
st.markdown(f"* **图片类型**:{out['image_type']}")
|
| 47 |
+
st.markdown("* **识别内容**:")
|
| 48 |
+
|
| 49 |
+
cols = st.columns([1, 3, 1])
|
| 50 |
+
with cols[1]:
|
| 51 |
+
st.text(out['text'])
|
| 52 |
+
|
| 53 |
+
if out['image_type'] == 'formula':
|
| 54 |
+
st.markdown(f"$${out['text']}$$")
|
| 55 |
+
|
| 56 |
+
except Exception as e:
|
| 57 |
+
st.error(e)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
if __name__ == '__main__':
|
| 61 |
+
main()
|
pix2text/cli.py
ADDED
|
@@ -0,0 +1,751 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding: utf-8
|
| 2 |
+
# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix.
|
| 3 |
+
# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com).
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
import logging
|
| 7 |
+
import glob
|
| 8 |
+
import json
|
| 9 |
+
from multiprocessing import Process
|
| 10 |
+
from pprint import pformat
|
| 11 |
+
|
| 12 |
+
import click
|
| 13 |
+
|
| 14 |
+
from pix2text import set_logger, Pix2Text
|
| 15 |
+
|
| 16 |
+
_CONTEXT_SETTINGS = {"help_option_names": ['-h', '--help']}
|
| 17 |
+
logger = set_logger(log_level=logging.INFO)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@click.group(context_settings=_CONTEXT_SETTINGS)
|
| 21 |
+
def cli():
|
| 22 |
+
pass
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@cli.command('predict')
|
| 26 |
+
@click.option(
|
| 27 |
+
"-l",
|
| 28 |
+
"--languages",
|
| 29 |
+
type=str,
|
| 30 |
+
default='en,ch_sim',
|
| 31 |
+
help="Language Codes for Text-OCR to recognize, separated by commas",
|
| 32 |
+
show_default=True,
|
| 33 |
+
)
|
| 34 |
+
@click.option(
|
| 35 |
+
"--layout-config",
|
| 36 |
+
type=str,
|
| 37 |
+
default=None,
|
| 38 |
+
help="Configuration information for the layout parser model, in JSON string format. Default: `None`, meaning using the default configuration",
|
| 39 |
+
show_default=True,
|
| 40 |
+
)
|
| 41 |
+
@click.option(
|
| 42 |
+
"--mfd-config",
|
| 43 |
+
type=str,
|
| 44 |
+
default=None,
|
| 45 |
+
help="Configuration information for the MFD model, in JSON string format. Default: `None`, meaning using the default configuration",
|
| 46 |
+
show_default=True,
|
| 47 |
+
)
|
| 48 |
+
@click.option(
|
| 49 |
+
"--formula-ocr-config",
|
| 50 |
+
type=str,
|
| 51 |
+
default=None,
|
| 52 |
+
help="Configuration information for the Latex-OCR mathematical formula recognition model. Default: `None`, meaning using the default configuration",
|
| 53 |
+
show_default=True,
|
| 54 |
+
)
|
| 55 |
+
@click.option(
|
| 56 |
+
"--text-ocr-config",
|
| 57 |
+
type=str,
|
| 58 |
+
default=None,
|
| 59 |
+
help="Configuration information for Text-OCR recognition, in JSON string format. Default: `None`, meaning using the default configuration",
|
| 60 |
+
show_default=True,
|
| 61 |
+
)
|
| 62 |
+
@click.option(
|
| 63 |
+
"--enable-formula/--disable-formula",
|
| 64 |
+
default=True,
|
| 65 |
+
help="Whether to enable formula recognition",
|
| 66 |
+
show_default=True,
|
| 67 |
+
)
|
| 68 |
+
@click.option(
|
| 69 |
+
"--enable-table/--disable-table",
|
| 70 |
+
default=True,
|
| 71 |
+
help="Whether to enable table recognition",
|
| 72 |
+
show_default=True,
|
| 73 |
+
)
|
| 74 |
+
@click.option(
|
| 75 |
+
"-d",
|
| 76 |
+
"--device",
|
| 77 |
+
help="Choose to run the code using `cpu`, `gpu`, or a specific GPU like `cuda:0`",
|
| 78 |
+
type=str,
|
| 79 |
+
default='cpu',
|
| 80 |
+
show_default=True,
|
| 81 |
+
)
|
| 82 |
+
@click.option(
|
| 83 |
+
"--file-type",
|
| 84 |
+
type=click.Choice(['pdf', 'page', 'text_formula', 'formula', 'text']),
|
| 85 |
+
default='text_formula',
|
| 86 |
+
help="Which file type to process, 'pdf', 'page', 'text_formula', 'formula', or 'text'",
|
| 87 |
+
show_default=True,
|
| 88 |
+
)
|
| 89 |
+
@click.option(
|
| 90 |
+
"--resized-shape",
|
| 91 |
+
help="Resize the image width to this size before processing",
|
| 92 |
+
type=int,
|
| 93 |
+
default=768,
|
| 94 |
+
show_default=True,
|
| 95 |
+
)
|
| 96 |
+
@click.option(
|
| 97 |
+
"-i",
|
| 98 |
+
"--img-file-or-dir",
|
| 99 |
+
required=True,
|
| 100 |
+
help="File path of the input image/pdf or the specified directory",
|
| 101 |
+
)
|
| 102 |
+
@click.option(
|
| 103 |
+
"--save-debug-res",
|
| 104 |
+
default=None,
|
| 105 |
+
help="If `save_debug_res` is set, the directory to save the debug results; default value is `None`, which means not to save",
|
| 106 |
+
show_default=True,
|
| 107 |
+
)
|
| 108 |
+
@click.option(
|
| 109 |
+
"--rec-kwargs",
|
| 110 |
+
type=str,
|
| 111 |
+
default=None,
|
| 112 |
+
help="kwargs for calling .recognize(), in JSON string format",
|
| 113 |
+
show_default=True,
|
| 114 |
+
)
|
| 115 |
+
@click.option(
|
| 116 |
+
"--return-text/--no-return-text",
|
| 117 |
+
default=True,
|
| 118 |
+
help="Whether to return only the text result",
|
| 119 |
+
show_default=True,
|
| 120 |
+
)
|
| 121 |
+
@click.option(
|
| 122 |
+
"--auto-line-break/--no-auto-line-break",
|
| 123 |
+
default=True,
|
| 124 |
+
help="Whether to automatically determine to merge adjacent line results into a single line result",
|
| 125 |
+
show_default=True,
|
| 126 |
+
)
|
| 127 |
+
@click.option(
|
| 128 |
+
"-o",
|
| 129 |
+
"--output-dir",
|
| 130 |
+
default='output-md',
|
| 131 |
+
help="Output directory for the recognized text results. Only effective when `file-type` is `pdf` or `page`",
|
| 132 |
+
show_default=True,
|
| 133 |
+
)
|
| 134 |
+
@click.option(
|
| 135 |
+
"--log-level",
|
| 136 |
+
default='INFO',
|
| 137 |
+
help="Log Level, such as `INFO`, `DEBUG`",
|
| 138 |
+
show_default=True,
|
| 139 |
+
)
|
| 140 |
+
def predict(
|
| 141 |
+
languages,
|
| 142 |
+
layout_config,
|
| 143 |
+
mfd_config,
|
| 144 |
+
formula_ocr_config,
|
| 145 |
+
text_ocr_config,
|
| 146 |
+
enable_formula,
|
| 147 |
+
enable_table,
|
| 148 |
+
device,
|
| 149 |
+
file_type,
|
| 150 |
+
resized_shape,
|
| 151 |
+
img_file_or_dir,
|
| 152 |
+
save_debug_res,
|
| 153 |
+
rec_kwargs,
|
| 154 |
+
return_text,
|
| 155 |
+
auto_line_break,
|
| 156 |
+
output_dir,
|
| 157 |
+
log_level,
|
| 158 |
+
):
|
| 159 |
+
"""Use Pix2Text (P2T) to predict the text information in an image or PDF."""
|
| 160 |
+
logger = set_logger(log_level=log_level)
|
| 161 |
+
|
| 162 |
+
mfd_config = json.loads(mfd_config) if mfd_config else {}
|
| 163 |
+
formula_ocr_config = json.loads(formula_ocr_config) if formula_ocr_config else {}
|
| 164 |
+
languages = [lang.strip() for lang in languages.split(',') if lang.strip()]
|
| 165 |
+
text_ocr_config = json.loads(text_ocr_config) if text_ocr_config else {}
|
| 166 |
+
|
| 167 |
+
layout_config = json.loads(layout_config) if layout_config else {}
|
| 168 |
+
text_formula_config = {
|
| 169 |
+
'languages': languages, # 'en,ch_sim
|
| 170 |
+
'mfd': mfd_config,
|
| 171 |
+
'formula': formula_ocr_config,
|
| 172 |
+
'text': text_ocr_config,
|
| 173 |
+
}
|
| 174 |
+
total_config = {
|
| 175 |
+
'layout': layout_config,
|
| 176 |
+
'text_formula': text_formula_config,
|
| 177 |
+
}
|
| 178 |
+
p2t = Pix2Text.from_config(
|
| 179 |
+
total_configs=total_config,
|
| 180 |
+
enable_formula=enable_formula,
|
| 181 |
+
enable_table=enable_table,
|
| 182 |
+
device=device,
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
fp_list = []
|
| 186 |
+
if os.path.isfile(img_file_or_dir):
|
| 187 |
+
fp_list.append(img_file_or_dir)
|
| 188 |
+
if save_debug_res:
|
| 189 |
+
save_debug_res = [save_debug_res]
|
| 190 |
+
elif os.path.isdir(img_file_or_dir):
|
| 191 |
+
fn_list = glob.glob1(img_file_or_dir, '*g')
|
| 192 |
+
fp_list = [os.path.join(img_file_or_dir, fn) for fn in fn_list]
|
| 193 |
+
if save_debug_res:
|
| 194 |
+
os.makedirs(save_debug_res, exist_ok=True)
|
| 195 |
+
save_debug_res = [
|
| 196 |
+
os.path.join(save_debug_res, 'output-debugs-' + fn) for fn in fn_list
|
| 197 |
+
]
|
| 198 |
+
else:
|
| 199 |
+
raise ValueError(f'{img_file_or_dir} is not a valid file or directory')
|
| 200 |
+
|
| 201 |
+
rec_kwargs = json.loads(rec_kwargs) if rec_kwargs else {}
|
| 202 |
+
rec_kwargs['resized_shape'] = resized_shape
|
| 203 |
+
rec_kwargs['return_text'] = return_text
|
| 204 |
+
rec_kwargs['auto_line_break'] = auto_line_break
|
| 205 |
+
|
| 206 |
+
for idx, fp in enumerate(fp_list):
|
| 207 |
+
if file_type in ('pdf', 'page'):
|
| 208 |
+
rec_kwargs['save_debug_res'] = (
|
| 209 |
+
save_debug_res[idx] if save_debug_res is not None else None
|
| 210 |
+
)
|
| 211 |
+
else:
|
| 212 |
+
rec_kwargs['save_analysis_res'] = (
|
| 213 |
+
save_debug_res[idx] if save_debug_res is not None else None
|
| 214 |
+
)
|
| 215 |
+
out = p2t.recognize(fp, file_type=file_type, **rec_kwargs)
|
| 216 |
+
if file_type in ('pdf', 'page'):
|
| 217 |
+
out = out.to_markdown(output_dir)
|
| 218 |
+
logger.info(
|
| 219 |
+
f'In image: {fp}\nOuts: \n{out if isinstance(out, str) else pformat(out)}\n'
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
@cli.command('evaluate')
|
| 224 |
+
@click.option(
|
| 225 |
+
"-l",
|
| 226 |
+
"--languages",
|
| 227 |
+
type=str,
|
| 228 |
+
default='en,ch_sim',
|
| 229 |
+
help="Language Codes for Text-OCR to recognize, separated by commas",
|
| 230 |
+
show_default=True,
|
| 231 |
+
)
|
| 232 |
+
@click.option(
|
| 233 |
+
"--layout-config",
|
| 234 |
+
type=str,
|
| 235 |
+
default=None,
|
| 236 |
+
help="Configuration information for the layout parser model, in JSON string format. Default: `None`, meaning using the default configuration",
|
| 237 |
+
show_default=True,
|
| 238 |
+
)
|
| 239 |
+
@click.option(
|
| 240 |
+
"--mfd-config",
|
| 241 |
+
type=str,
|
| 242 |
+
default=None,
|
| 243 |
+
help="Configuration information for the MFD model, in JSON string format. Default: `None`, meaning using the default configuration",
|
| 244 |
+
show_default=True,
|
| 245 |
+
)
|
| 246 |
+
@click.option(
|
| 247 |
+
"--formula-ocr-config",
|
| 248 |
+
type=str,
|
| 249 |
+
default=None,
|
| 250 |
+
help="Configuration information for the Latex-OCR mathematical formula recognition model. Default: `None`, meaning using the default configuration",
|
| 251 |
+
show_default=True,
|
| 252 |
+
)
|
| 253 |
+
@click.option(
|
| 254 |
+
"--text-ocr-config",
|
| 255 |
+
type=str,
|
| 256 |
+
default=None,
|
| 257 |
+
help="Configuration information for Text-OCR recognition, in JSON string format. Default: `None`, meaning using the default configuration",
|
| 258 |
+
show_default=True,
|
| 259 |
+
)
|
| 260 |
+
@click.option(
|
| 261 |
+
"--enable-formula/--disable-formula",
|
| 262 |
+
default=True,
|
| 263 |
+
help="Whether to enable formula recognition",
|
| 264 |
+
show_default=True,
|
| 265 |
+
)
|
| 266 |
+
@click.option(
|
| 267 |
+
"--enable-table/--disable-table",
|
| 268 |
+
default=True,
|
| 269 |
+
help="Whether to enable table recognition",
|
| 270 |
+
show_default=True,
|
| 271 |
+
)
|
| 272 |
+
@click.option(
|
| 273 |
+
"-d",
|
| 274 |
+
"--device",
|
| 275 |
+
help="Choose to run the code using `cpu`, `gpu`, or a specific GPU like `cuda:0`",
|
| 276 |
+
type=str,
|
| 277 |
+
default='cpu',
|
| 278 |
+
show_default=True,
|
| 279 |
+
)
|
| 280 |
+
@click.option(
|
| 281 |
+
"--file-type",
|
| 282 |
+
type=click.Choice(['pdf', 'page', 'text_formula', 'formula', 'text']),
|
| 283 |
+
default='text_formula',
|
| 284 |
+
help="Which file type to process, 'pdf', 'page', 'text_formula', 'formula', or 'text'",
|
| 285 |
+
show_default=True,
|
| 286 |
+
)
|
| 287 |
+
@click.option(
|
| 288 |
+
"--resized-shape",
|
| 289 |
+
help="Resize the image width to this size before processing",
|
| 290 |
+
type=int,
|
| 291 |
+
default=768,
|
| 292 |
+
show_default=True,
|
| 293 |
+
)
|
| 294 |
+
@click.option(
|
| 295 |
+
"-i",
|
| 296 |
+
"--input-json",
|
| 297 |
+
required=True,
|
| 298 |
+
help="JSON file containing evaluation data with image paths and ground truth",
|
| 299 |
+
)
|
| 300 |
+
@click.option(
|
| 301 |
+
"--gt-key",
|
| 302 |
+
default="model_result",
|
| 303 |
+
help="Key name for ground truth text in the JSON data",
|
| 304 |
+
show_default=True,
|
| 305 |
+
)
|
| 306 |
+
@click.option(
|
| 307 |
+
"--prefix-img-dir",
|
| 308 |
+
default="data",
|
| 309 |
+
help="Root directory for image files, will be prepended to img_path in JSON",
|
| 310 |
+
show_default=True,
|
| 311 |
+
)
|
| 312 |
+
@click.option(
|
| 313 |
+
"--rec-kwargs",
|
| 314 |
+
type=str,
|
| 315 |
+
default=None,
|
| 316 |
+
help="kwargs for calling .recognize(), in JSON string format",
|
| 317 |
+
show_default=True,
|
| 318 |
+
)
|
| 319 |
+
@click.option(
|
| 320 |
+
"--auto-line-break/--no-auto-line-break",
|
| 321 |
+
default=True,
|
| 322 |
+
help="Whether to automatically determine to merge adjacent line results into a single line result",
|
| 323 |
+
show_default=True,
|
| 324 |
+
)
|
| 325 |
+
@click.option(
|
| 326 |
+
"-o",
|
| 327 |
+
"--output-json",
|
| 328 |
+
default='evaluation_results.json',
|
| 329 |
+
help="Output JSON file for evaluation results",
|
| 330 |
+
show_default=True,
|
| 331 |
+
)
|
| 332 |
+
@click.option(
|
| 333 |
+
"--output-excel",
|
| 334 |
+
default=None,
|
| 335 |
+
help="Output Excel file with embedded images (optional)",
|
| 336 |
+
show_default=True,
|
| 337 |
+
)
|
| 338 |
+
@click.option(
|
| 339 |
+
"--output-html",
|
| 340 |
+
default=None,
|
| 341 |
+
help="Output HTML report with embedded images (optional)",
|
| 342 |
+
show_default=True,
|
| 343 |
+
)
|
| 344 |
+
@click.option(
|
| 345 |
+
"--max-img-width",
|
| 346 |
+
default=400,
|
| 347 |
+
help="Maximum width for embedded images in pixels",
|
| 348 |
+
show_default=True,
|
| 349 |
+
)
|
| 350 |
+
@click.option(
|
| 351 |
+
"--max-img-height",
|
| 352 |
+
default=300,
|
| 353 |
+
help="Maximum height for embedded images in pixels",
|
| 354 |
+
show_default=True,
|
| 355 |
+
)
|
| 356 |
+
@click.option(
|
| 357 |
+
"--max-samples",
|
| 358 |
+
default=-1,
|
| 359 |
+
help="Maximum number of samples to process (-1 for all samples)",
|
| 360 |
+
show_default=True,
|
| 361 |
+
)
|
| 362 |
+
@click.option(
|
| 363 |
+
"--log-level",
|
| 364 |
+
default='INFO',
|
| 365 |
+
help="Log Level, such as `INFO`, `DEBUG`",
|
| 366 |
+
show_default=True,
|
| 367 |
+
)
|
| 368 |
+
def evaluate(
|
| 369 |
+
languages,
|
| 370 |
+
layout_config,
|
| 371 |
+
mfd_config,
|
| 372 |
+
formula_ocr_config,
|
| 373 |
+
text_ocr_config,
|
| 374 |
+
enable_formula,
|
| 375 |
+
enable_table,
|
| 376 |
+
device,
|
| 377 |
+
file_type,
|
| 378 |
+
resized_shape,
|
| 379 |
+
input_json,
|
| 380 |
+
gt_key,
|
| 381 |
+
prefix_img_dir,
|
| 382 |
+
rec_kwargs,
|
| 383 |
+
auto_line_break,
|
| 384 |
+
output_json,
|
| 385 |
+
output_excel,
|
| 386 |
+
output_html,
|
| 387 |
+
max_img_width,
|
| 388 |
+
max_img_height,
|
| 389 |
+
max_samples,
|
| 390 |
+
log_level,
|
| 391 |
+
):
|
| 392 |
+
"""Evaluate Pix2Text (P2T) performance using a JSON file with image paths and ground truth."""
|
| 393 |
+
from pix2text.utils import (
|
| 394 |
+
calculate_cer_batch,
|
| 395 |
+
calculate_cer,
|
| 396 |
+
save_evaluation_results_to_excel_with_images,
|
| 397 |
+
create_html_report_with_images
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
logger = set_logger(log_level=log_level)
|
| 401 |
+
|
| 402 |
+
# Load evaluation data
|
| 403 |
+
try:
|
| 404 |
+
with open(input_json, 'r', encoding='utf-8') as f:
|
| 405 |
+
eval_data = json.load(f)
|
| 406 |
+
except Exception as e:
|
| 407 |
+
logger.error(f"Failed to load evaluation data from {input_json}: {e}")
|
| 408 |
+
return
|
| 409 |
+
|
| 410 |
+
if not isinstance(eval_data, list):
|
| 411 |
+
logger.error("Evaluation data must be a list of dictionaries")
|
| 412 |
+
return
|
| 413 |
+
|
| 414 |
+
# Validate data format
|
| 415 |
+
for i, item in enumerate(eval_data):
|
| 416 |
+
if not isinstance(item, dict):
|
| 417 |
+
logger.error(f"Item {i} is not a dictionary")
|
| 418 |
+
return
|
| 419 |
+
if 'img_path' not in item or gt_key not in item:
|
| 420 |
+
logger.error(f"Item {i} missing required keys 'img_path' or '{gt_key}'")
|
| 421 |
+
return
|
| 422 |
+
|
| 423 |
+
# Initialize Pix2Text
|
| 424 |
+
mfd_config = json.loads(mfd_config) if mfd_config else {}
|
| 425 |
+
formula_ocr_config = json.loads(formula_ocr_config) if formula_ocr_config else {}
|
| 426 |
+
languages = [lang.strip() for lang in languages.split(',') if lang.strip()]
|
| 427 |
+
text_ocr_config = json.loads(text_ocr_config) if text_ocr_config else {}
|
| 428 |
+
|
| 429 |
+
layout_config = json.loads(layout_config) if layout_config else {}
|
| 430 |
+
text_formula_config = {
|
| 431 |
+
'languages': languages,
|
| 432 |
+
'mfd': mfd_config,
|
| 433 |
+
'formula': formula_ocr_config,
|
| 434 |
+
'text': text_ocr_config,
|
| 435 |
+
}
|
| 436 |
+
total_config = {
|
| 437 |
+
'layout': layout_config,
|
| 438 |
+
'text_formula': text_formula_config,
|
| 439 |
+
}
|
| 440 |
+
p2t = Pix2Text.from_config(
|
| 441 |
+
total_configs=total_config,
|
| 442 |
+
enable_formula=enable_formula,
|
| 443 |
+
enable_table=enable_table,
|
| 444 |
+
device=device,
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
# Prepare recognition kwargs
|
| 448 |
+
rec_kwargs = json.loads(rec_kwargs) if rec_kwargs else {}
|
| 449 |
+
rec_kwargs['resized_shape'] = resized_shape
|
| 450 |
+
rec_kwargs['return_text'] = True
|
| 451 |
+
rec_kwargs['auto_line_break'] = auto_line_break
|
| 452 |
+
|
| 453 |
+
def filter_and_clean_gt(gt):
|
| 454 |
+
# 只针对部分的图片进行识别
|
| 455 |
+
# 去掉收尾的'"'
|
| 456 |
+
if not gt:
|
| 457 |
+
return False, gt
|
| 458 |
+
if gt.startswith(r'$$') and gt.endswith(r'$$'):
|
| 459 |
+
gt = gt[2:-2]
|
| 460 |
+
if '$$' not in gt:
|
| 461 |
+
return True, gt.strip()
|
| 462 |
+
return False, gt
|
| 463 |
+
|
| 464 |
+
# Process each image and collect results
|
| 465 |
+
predictions = []
|
| 466 |
+
ground_truths = []
|
| 467 |
+
results = []
|
| 468 |
+
|
| 469 |
+
# Apply max_samples limit
|
| 470 |
+
if max_samples > 0:
|
| 471 |
+
import random
|
| 472 |
+
random.seed(42)
|
| 473 |
+
random.shuffle(eval_data)
|
| 474 |
+
|
| 475 |
+
logger.info(f"Limited to {max_samples} samples for evaluation")
|
| 476 |
+
logger.info(f"Starting evaluation on {len(eval_data)} images...")
|
| 477 |
+
|
| 478 |
+
for i, item in enumerate(eval_data):
|
| 479 |
+
if len(results) >= max_samples:
|
| 480 |
+
break
|
| 481 |
+
img_path = item['new_img_path']
|
| 482 |
+
ground_truth = item[gt_key]
|
| 483 |
+
|
| 484 |
+
# Handle ground truth that might be a JSON string
|
| 485 |
+
if isinstance(ground_truth, str):
|
| 486 |
+
try:
|
| 487 |
+
ground_truth = json.loads(ground_truth)
|
| 488 |
+
except json.JSONDecodeError:
|
| 489 |
+
# If it's not valid JSON, use as is
|
| 490 |
+
pass
|
| 491 |
+
|
| 492 |
+
# Apply formula filtering if needed
|
| 493 |
+
is_formula, ground_truth = filter_and_clean_gt(ground_truth)
|
| 494 |
+
if not is_formula:
|
| 495 |
+
continue
|
| 496 |
+
|
| 497 |
+
# Prepend prefix_img_dir to img_path if it's not an absolute path
|
| 498 |
+
if not os.path.isabs(img_path):
|
| 499 |
+
img_path = os.path.join(prefix_img_dir, img_path)
|
| 500 |
+
|
| 501 |
+
logger.info(f"Processing image {i+1}/{len(eval_data)}: {img_path}")
|
| 502 |
+
|
| 503 |
+
try:
|
| 504 |
+
# Check if image file exists
|
| 505 |
+
if not os.path.exists(img_path):
|
| 506 |
+
logger.warning(f"Image file not found: {img_path}")
|
| 507 |
+
continue
|
| 508 |
+
|
| 509 |
+
# Recognize text
|
| 510 |
+
prediction = p2t.recognize(img_path, file_type=file_type, **rec_kwargs)
|
| 511 |
+
|
| 512 |
+
# Convert to string if needed
|
| 513 |
+
if not isinstance(prediction, str):
|
| 514 |
+
if hasattr(prediction, 'to_markdown'):
|
| 515 |
+
prediction = prediction.to_markdown()
|
| 516 |
+
else:
|
| 517 |
+
prediction = str(prediction)
|
| 518 |
+
|
| 519 |
+
predictions.append(prediction)
|
| 520 |
+
ground_truths.append(ground_truth)
|
| 521 |
+
|
| 522 |
+
# Calculate individual CER
|
| 523 |
+
cer = calculate_cer(prediction, ground_truth)
|
| 524 |
+
|
| 525 |
+
result = {
|
| 526 |
+
'img_path': img_path,
|
| 527 |
+
'ground_truth': ground_truth,
|
| 528 |
+
'prediction': prediction,
|
| 529 |
+
'cer': cer
|
| 530 |
+
}
|
| 531 |
+
results.append(result)
|
| 532 |
+
|
| 533 |
+
logger.info(f"Image {img_path} CER: {cer:.4f}")
|
| 534 |
+
|
| 535 |
+
except Exception as e:
|
| 536 |
+
logger.error(f"Error processing image {img_path}: {e}")
|
| 537 |
+
continue
|
| 538 |
+
|
| 539 |
+
# resort results by cer
|
| 540 |
+
# results.sort(key=lambda x: x['cer'], reverse=True)
|
| 541 |
+
|
| 542 |
+
# Calculate overall CER
|
| 543 |
+
if predictions and ground_truths:
|
| 544 |
+
cer_stats = calculate_cer_batch(predictions, ground_truths)
|
| 545 |
+
|
| 546 |
+
# Prepare final results
|
| 547 |
+
evaluation_results = {
|
| 548 |
+
'summary': {
|
| 549 |
+
'total_samples': len(results),
|
| 550 |
+
'average_cer': cer_stats['average_cer'],
|
| 551 |
+
'individual_cers': cer_stats['individual_cers']
|
| 552 |
+
},
|
| 553 |
+
'detailed_results': results
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
# Save results
|
| 557 |
+
try:
|
| 558 |
+
with open(output_json, 'w', encoding='utf-8') as f:
|
| 559 |
+
json.dump(evaluation_results, f, ensure_ascii=False, indent=2)
|
| 560 |
+
logger.info(f"Evaluation results saved to: {output_json}")
|
| 561 |
+
except Exception as e:
|
| 562 |
+
logger.error(f"Failed to save evaluation results: {e}")
|
| 563 |
+
|
| 564 |
+
# Print summary
|
| 565 |
+
logger.info("=" * 50)
|
| 566 |
+
logger.info("EVALUATION SUMMARY")
|
| 567 |
+
logger.info("=" * 50)
|
| 568 |
+
logger.info(f"Total samples processed: {len(results)}")
|
| 569 |
+
logger.info(f"Average CER: {cer_stats['average_cer']:.4f}")
|
| 570 |
+
logger.info(f"Best CER: {min(cer_stats['individual_cers']):.4f}")
|
| 571 |
+
logger.info(f"Worst CER: {max(cer_stats['individual_cers']):.4f}")
|
| 572 |
+
logger.info("=" * 50)
|
| 573 |
+
|
| 574 |
+
else:
|
| 575 |
+
logger.error("No valid predictions generated")
|
| 576 |
+
|
| 577 |
+
# Save results to Excel with embedded images (if requested)
|
| 578 |
+
if output_excel and results:
|
| 579 |
+
excel_success = save_evaluation_results_to_excel_with_images(
|
| 580 |
+
results=results,
|
| 581 |
+
output_file=output_excel,
|
| 582 |
+
img_path_key='img_path',
|
| 583 |
+
gt_key='ground_truth',
|
| 584 |
+
pred_key='prediction',
|
| 585 |
+
cer_key='cer',
|
| 586 |
+
max_img_width=max_img_width,
|
| 587 |
+
max_img_height=max_img_height
|
| 588 |
+
)
|
| 589 |
+
if excel_success:
|
| 590 |
+
logger.info(f"Excel file with embedded images saved to: {output_excel}")
|
| 591 |
+
else:
|
| 592 |
+
logger.warning("Failed to save Excel file with embedded images")
|
| 593 |
+
|
| 594 |
+
# Save results to HTML report with embedded images (if requested)
|
| 595 |
+
if output_html and results:
|
| 596 |
+
html_success = create_html_report_with_images(
|
| 597 |
+
results=results,
|
| 598 |
+
output_file=output_html,
|
| 599 |
+
img_path_key='img_path',
|
| 600 |
+
gt_key='ground_truth',
|
| 601 |
+
pred_key='prediction',
|
| 602 |
+
cer_key='cer',
|
| 603 |
+
max_img_width=max_img_width,
|
| 604 |
+
max_img_height=max_img_height
|
| 605 |
+
)
|
| 606 |
+
if html_success:
|
| 607 |
+
logger.info(f"HTML report with embedded images saved to: {output_html}")
|
| 608 |
+
else:
|
| 609 |
+
logger.warning("Failed to save HTML report with embedded images")
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
@cli.command('serve')
|
| 613 |
+
@click.option(
|
| 614 |
+
"-l",
|
| 615 |
+
"--languages",
|
| 616 |
+
type=str,
|
| 617 |
+
default='en,ch_sim',
|
| 618 |
+
help="Language Codes for Text-OCR to recognize, separated by commas",
|
| 619 |
+
show_default=True,
|
| 620 |
+
)
|
| 621 |
+
@click.option(
|
| 622 |
+
"--layout-config",
|
| 623 |
+
type=str,
|
| 624 |
+
default=None,
|
| 625 |
+
help="Configuration information for the layout parser model, in JSON string format. Default: `None`, meaning using the default configuration",
|
| 626 |
+
show_default=True,
|
| 627 |
+
)
|
| 628 |
+
@click.option(
|
| 629 |
+
"--mfd-config",
|
| 630 |
+
type=str,
|
| 631 |
+
default=None,
|
| 632 |
+
help="Configuration information for the MFD model, in JSON string format. Default: `None`, meaning using the default configuration",
|
| 633 |
+
show_default=True,
|
| 634 |
+
)
|
| 635 |
+
@click.option(
|
| 636 |
+
"--formula-ocr-config",
|
| 637 |
+
type=str,
|
| 638 |
+
default=None,
|
| 639 |
+
help="Configuration information for the Latex-OCR mathematical formula recognition model. Default: `None`, meaning using the default configuration",
|
| 640 |
+
show_default=True,
|
| 641 |
+
)
|
| 642 |
+
@click.option(
|
| 643 |
+
"--text-ocr-config",
|
| 644 |
+
type=str,
|
| 645 |
+
default=None,
|
| 646 |
+
help="Configuration information for Text-OCR recognition, in JSON string format. Default: `None`, meaning using the default configuration",
|
| 647 |
+
show_default=True,
|
| 648 |
+
)
|
| 649 |
+
@click.option(
|
| 650 |
+
"--enable-formula/--disable-formula",
|
| 651 |
+
default=True,
|
| 652 |
+
help="Whether to enable formula recognition",
|
| 653 |
+
show_default=True,
|
| 654 |
+
)
|
| 655 |
+
@click.option(
|
| 656 |
+
"--enable-table/--disable-table",
|
| 657 |
+
default=True,
|
| 658 |
+
help="Whether to enable table recognition",
|
| 659 |
+
show_default=True,
|
| 660 |
+
)
|
| 661 |
+
@click.option(
|
| 662 |
+
"-d",
|
| 663 |
+
"--device",
|
| 664 |
+
help="Choose to run the code using `cpu`, `gpu`, or a specific GPU like `cuda:0`",
|
| 665 |
+
type=str,
|
| 666 |
+
default='cpu',
|
| 667 |
+
show_default=True,
|
| 668 |
+
)
|
| 669 |
+
@click.option(
|
| 670 |
+
"-o",
|
| 671 |
+
"--output-md-root-dir",
|
| 672 |
+
default='output-md-root',
|
| 673 |
+
help="Markdown output root directory for the recognized text results. Only effective when `file-type` is `pdf` or `page`",
|
| 674 |
+
show_default=True,
|
| 675 |
+
)
|
| 676 |
+
@click.option(
|
| 677 |
+
'-H', '--host', type=str, default='0.0.0.0', help='server host', show_default=True,
|
| 678 |
+
)
|
| 679 |
+
@click.option(
|
| 680 |
+
'-p', '--port', type=int, default=8503, help='server port', show_default=True,
|
| 681 |
+
)
|
| 682 |
+
@click.option(
|
| 683 |
+
'--reload',
|
| 684 |
+
is_flag=True,
|
| 685 |
+
help='whether to reload the server when the codes have been changed',
|
| 686 |
+
show_default=True,
|
| 687 |
+
)
|
| 688 |
+
@click.option(
|
| 689 |
+
"--log-level",
|
| 690 |
+
default='INFO',
|
| 691 |
+
help="Log Level, such as `INFO`, `DEBUG`",
|
| 692 |
+
show_default=True,
|
| 693 |
+
)
|
| 694 |
+
def serve(
|
| 695 |
+
languages,
|
| 696 |
+
layout_config,
|
| 697 |
+
mfd_config,
|
| 698 |
+
formula_ocr_config,
|
| 699 |
+
text_ocr_config,
|
| 700 |
+
enable_formula,
|
| 701 |
+
enable_table,
|
| 702 |
+
device,
|
| 703 |
+
output_md_root_dir,
|
| 704 |
+
host,
|
| 705 |
+
port,
|
| 706 |
+
reload,
|
| 707 |
+
log_level,
|
| 708 |
+
):
|
| 709 |
+
"""Start the HTTP service."""
|
| 710 |
+
from pix2text.serve import start_server
|
| 711 |
+
|
| 712 |
+
logger = set_logger(log_level=log_level)
|
| 713 |
+
|
| 714 |
+
analyzer_config = json.loads(mfd_config) if mfd_config else {}
|
| 715 |
+
formula_ocr_config = json.loads(formula_ocr_config) if formula_ocr_config else {}
|
| 716 |
+
languages = [lang.strip() for lang in languages.split(',') if lang.strip()]
|
| 717 |
+
text_ocr_config = json.loads(text_ocr_config) if text_ocr_config else {}
|
| 718 |
+
|
| 719 |
+
layout_config = json.loads(layout_config) if layout_config else {}
|
| 720 |
+
text_formula_config = {
|
| 721 |
+
'languages': languages, # 'en,ch_sim
|
| 722 |
+
'mfd': analyzer_config,
|
| 723 |
+
'formula': formula_ocr_config,
|
| 724 |
+
'text': text_ocr_config,
|
| 725 |
+
}
|
| 726 |
+
total_config = {
|
| 727 |
+
'layout': layout_config,
|
| 728 |
+
'text_formula': text_formula_config,
|
| 729 |
+
}
|
| 730 |
+
p2t_config = dict(
|
| 731 |
+
total_configs=total_config,
|
| 732 |
+
enable_formula=enable_formula,
|
| 733 |
+
enable_table=enable_table,
|
| 734 |
+
device=device,
|
| 735 |
+
)
|
| 736 |
+
api = Process(
|
| 737 |
+
target=start_server,
|
| 738 |
+
kwargs={
|
| 739 |
+
'p2t_config': p2t_config,
|
| 740 |
+
'output_md_root_dir': output_md_root_dir,
|
| 741 |
+
'host': host,
|
| 742 |
+
'port': port,
|
| 743 |
+
'reload': reload,
|
| 744 |
+
},
|
| 745 |
+
)
|
| 746 |
+
api.start()
|
| 747 |
+
api.join()
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
if __name__ == "__main__":
|
| 751 |
+
cli()
|
pix2text/consts.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding: utf-8
|
| 2 |
+
# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix.
|
| 3 |
+
# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com).
|
| 4 |
+
import os
|
| 5 |
+
import logging
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
from copy import copy, deepcopy
|
| 8 |
+
from typing import Set, Tuple, Dict, Any, Optional
|
| 9 |
+
|
| 10 |
+
from .__version__ import __version__
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
# 模型版本只对应到第二层,第三层的改动表示模型兼容。
|
| 15 |
+
# 如: __version__ = '1.0.*',对应的 MODEL_VERSION 都是 '1.0'
|
| 16 |
+
MODEL_VERSION = '.'.join(__version__.split('.', maxsplit=2)[:2])
|
| 17 |
+
DOWNLOAD_SOURCE = os.environ.get('PIX2TEXT_DOWNLOAD_SOURCE', 'HF')
|
| 18 |
+
|
| 19 |
+
CN_OSS_ENDPOINT = (
|
| 20 |
+
"https://sg-models.oss-cn-beijing.aliyuncs.com/pix2text/%s/" % MODEL_VERSION
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def format_model_info(info: dict) -> dict:
|
| 25 |
+
out_dict = copy(info)
|
| 26 |
+
out_dict['cn_oss'] = CN_OSS_ENDPOINT
|
| 27 |
+
return out_dict
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class AvailableModels(object):
|
| 31 |
+
P2T_SPACE = '__pix2text__'
|
| 32 |
+
|
| 33 |
+
FREE_MODELS = OrderedDict(
|
| 34 |
+
{
|
| 35 |
+
('mfr', 'onnx'): {
|
| 36 |
+
'filename': 'p2t-mfr-onnx.zip', # download the file from CN OSS
|
| 37 |
+
'hf_model_id': 'breezedeus/pix2text-mfr',
|
| 38 |
+
'local_model_id': 'mfr-onnx',
|
| 39 |
+
},
|
| 40 |
+
('mfd', 'onnx'): {
|
| 41 |
+
'filename': 'p2t-mfd-onnx.zip', # download the file from CN OSS
|
| 42 |
+
'hf_model_id': 'breezedeus/pix2text-mfd',
|
| 43 |
+
'local_model_id': 'mfd-onnx',
|
| 44 |
+
},
|
| 45 |
+
('mfd-1.5', 'onnx'): {
|
| 46 |
+
# 'filename': 'p2t-mfd-onnx.zip', # download the file from CN OSS
|
| 47 |
+
'hf_model_id': 'breezedeus/pix2text-mfd-1.5',
|
| 48 |
+
'local_model_id': 'mfd-1.5-onnx',
|
| 49 |
+
},
|
| 50 |
+
('mfr-1.5', 'onnx'): {
|
| 51 |
+
# 'filename': 'p2t-mfr-onnx.zip', # download the file from CN OSS
|
| 52 |
+
'hf_model_id': 'breezedeus/pix2text-mfr-1.5',
|
| 53 |
+
'local_model_id': 'mfr-1.5-onnx',
|
| 54 |
+
},
|
| 55 |
+
}
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
PAID_MODELS = OrderedDict(
|
| 59 |
+
{
|
| 60 |
+
('mfr', 'pytorch'): {
|
| 61 |
+
'filename': 'p2t-mfr-pytorch.zip', # download the file from CN OSS
|
| 62 |
+
'hf_model_id': 'breezedeus/pix2text-mfr-pytorch',
|
| 63 |
+
'local_model_id': 'mfr-pytorch',
|
| 64 |
+
},
|
| 65 |
+
('mfr-pro', 'onnx'): {
|
| 66 |
+
'filename': 'p2t-mfr-pro-onnx.zip', # download the file from CN OSS
|
| 67 |
+
'hf_model_id': 'breezedeus/pix2text-mfr-pro',
|
| 68 |
+
'local_model_id': 'mfr-pro-onnx',
|
| 69 |
+
},
|
| 70 |
+
('mfr-pro', 'pytorch'): {
|
| 71 |
+
'filename': 'p2t-mfr-pro-pytorch.zip', # download the file from CN OSS
|
| 72 |
+
'hf_model_id': 'breezedeus/pix2text-mfr-pro-pytorch',
|
| 73 |
+
'local_model_id': 'mfr-pro-pytorch',
|
| 74 |
+
},
|
| 75 |
+
('mfr-plus', 'onnx'): {
|
| 76 |
+
'filename': 'p2t-mfr-plus-onnx.zip', # download the file from CN OSS
|
| 77 |
+
'hf_model_id': 'breezedeus/pix2text-mfr-plus',
|
| 78 |
+
'local_model_id': 'mfr-plus-onnx',
|
| 79 |
+
},
|
| 80 |
+
('mfr-plus', 'pytorch'): {
|
| 81 |
+
'filename': 'p2t-mfr-plus-pytorch.zip', # download the file from CN OSS
|
| 82 |
+
'hf_model_id': 'breezedeus/pix2text-mfr-plus-pytorch',
|
| 83 |
+
'local_model_id': 'mfr-plus-pytorch',
|
| 84 |
+
},
|
| 85 |
+
('mfd', 'pytorch'): {
|
| 86 |
+
'filename': 'p2t-mfd-pytorch.zip', # download the file from CN OSS
|
| 87 |
+
'hf_model_id': 'breezedeus/pix2text-mfd-pytorch',
|
| 88 |
+
'local_model_id': 'mfd-pytorch',
|
| 89 |
+
},
|
| 90 |
+
('mfd-advanced', 'onnx'): {
|
| 91 |
+
'filename': 'p2t-mfd-advanced-onnx.zip', # download the file from CN OSS
|
| 92 |
+
'hf_model_id': 'breezedeus/pix2text-mfd-advanced',
|
| 93 |
+
'local_model_id': 'mfd-advanced-onnx',
|
| 94 |
+
},
|
| 95 |
+
('mfd-advanced', 'pytorch'): {
|
| 96 |
+
'filename': 'p2t-mfd-advanced-pytorch.zip', # download the file from CN OSS
|
| 97 |
+
'hf_model_id': 'breezedeus/pix2text-mfd-advanced-pytorch',
|
| 98 |
+
'local_model_id': 'mfd-advanced-pytorch',
|
| 99 |
+
},
|
| 100 |
+
('mfd-pro', 'onnx'): {
|
| 101 |
+
'filename': 'p2t-mfd-pro-onnx.zip', # download the file from CN OSS
|
| 102 |
+
'hf_model_id': 'breezedeus/pix2text-mfd-pro',
|
| 103 |
+
'local_model_id': 'mfd-pro-onnx',
|
| 104 |
+
},
|
| 105 |
+
('mfd-pro', 'pytorch'): {
|
| 106 |
+
'filename': 'p2t-mfd-pro-pytorch.zip', # download the file from CN OSS
|
| 107 |
+
'hf_model_id': 'breezedeus/pix2text-mfd-pro-pytorch',
|
| 108 |
+
'local_model_id': 'mfd-pro-pytorch',
|
| 109 |
+
},
|
| 110 |
+
('mfd-1.5', 'pytorch'): {
|
| 111 |
+
# 'filename': 'p2t-mfd-1.5-pytorch.zip',
|
| 112 |
+
'hf_model_id': 'breezedeus/pix2text-mfd-1.5-pytorch',
|
| 113 |
+
'local_model_id': 'mfd-1.5-pytorch',
|
| 114 |
+
},
|
| 115 |
+
('mfd-advanced-1.5', 'onnx'): {
|
| 116 |
+
# 'filename': 'p2t-mfd-advanced-onnx.zip', # download the file from CN OSS
|
| 117 |
+
'hf_model_id': 'breezedeus/pix2text-mfd-advanced-1.5',
|
| 118 |
+
'local_model_id': 'mfd-advanced-1.5-onnx',
|
| 119 |
+
},
|
| 120 |
+
('mfd-advanced-1.5', 'pytorch'): {
|
| 121 |
+
# 'filename': 'p2t-mfd-advanced-pytorch.zip', # download the file from CN OSS
|
| 122 |
+
'hf_model_id': 'breezedeus/pix2text-mfd-advanced-1.5-pytorch',
|
| 123 |
+
'local_model_id': 'mfd-advanced-1.5-pytorch',
|
| 124 |
+
},
|
| 125 |
+
('mfd-pro-1.5', 'onnx'): {
|
| 126 |
+
# 'filename': 'p2t-mfd-pro-onnx.zip', # download the file from CN OSS
|
| 127 |
+
'hf_model_id': 'breezedeus/pix2text-mfd-pro-1.5',
|
| 128 |
+
'local_model_id': 'mfd-pro-1.5-onnx',
|
| 129 |
+
},
|
| 130 |
+
('mfd-pro-1.5', 'pytorch'): {
|
| 131 |
+
# 'filename': 'p2t-mfd-pro-pytorch.zip', # download the file from CN OSS
|
| 132 |
+
'hf_model_id': 'breezedeus/pix2text-mfd-pro-1.5-pytorch',
|
| 133 |
+
'local_model_id': 'mfd-pro-1.5-pytorch',
|
| 134 |
+
},
|
| 135 |
+
('mfr-1.5', 'pytorch'): {
|
| 136 |
+
# 'filename': 'p2t-mfr-pytorch.zip', # download the file from CN OSS
|
| 137 |
+
'hf_model_id': 'breezedeus/pix2text-mfr-1.5-pytorch',
|
| 138 |
+
'local_model_id': 'mfr-1.5-pytorch',
|
| 139 |
+
},
|
| 140 |
+
('mfr-pro-1.5', 'onnx'): {
|
| 141 |
+
# 'filename': 'p2t-mfr-pro-onnx.zip', # download the file from CN OSS
|
| 142 |
+
'hf_model_id': 'breezedeus/pix2text-mfr-pro-1.5',
|
| 143 |
+
'local_model_id': 'mfr-pro-1.5-onnx',
|
| 144 |
+
},
|
| 145 |
+
('mfr-pro-1.5', 'pytorch'): {
|
| 146 |
+
# 'filename': 'p2t-mfr-pro-pytorch.zip', # download the file from CN OSS
|
| 147 |
+
'hf_model_id': 'breezedeus/pix2text-mfr-pro-1.5-pytorch',
|
| 148 |
+
'local_model_id': 'mfr-pro-1.5-pytorch',
|
| 149 |
+
},
|
| 150 |
+
}
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
P2T_MODELS = deepcopy(FREE_MODELS)
|
| 154 |
+
P2T_MODELS.update(PAID_MODELS)
|
| 155 |
+
OUTER_MODELS = {}
|
| 156 |
+
|
| 157 |
+
def all_models(self) -> Set[Tuple[str, str]]:
|
| 158 |
+
return set(self.P2T_MODELS.keys()) | set(self.OUTER_MODELS.keys())
|
| 159 |
+
|
| 160 |
+
def __contains__(self, model_name_backend: Tuple[str, str]) -> bool:
|
| 161 |
+
return model_name_backend in self.all_models()
|
| 162 |
+
|
| 163 |
+
def register_models(self, model_dict: Dict[Tuple[str, str], Any], space: str):
|
| 164 |
+
assert not space.startswith('__')
|
| 165 |
+
for key, val in model_dict.items():
|
| 166 |
+
if key in self.P2T_MODELS or key in self.OUTER_MODELS:
|
| 167 |
+
logger.warning(
|
| 168 |
+
'model %s has already existed, and will be ignored' % key
|
| 169 |
+
)
|
| 170 |
+
continue
|
| 171 |
+
val = deepcopy(val)
|
| 172 |
+
val['space'] = space
|
| 173 |
+
self.OUTER_MODELS[key] = val
|
| 174 |
+
|
| 175 |
+
def get_space(self, model_name, model_backend) -> Optional[str]:
|
| 176 |
+
if (model_name, model_backend) in self.P2T_MODELS:
|
| 177 |
+
return self.P2T_SPACE
|
| 178 |
+
elif (model_name, model_backend) in self.OUTER_MODELS:
|
| 179 |
+
return self.OUTER_MODELS[(model_name, model_backend)]['space']
|
| 180 |
+
return self.P2T_SPACE
|
| 181 |
+
|
| 182 |
+
def get_info(self, model_name, model_backend) -> Optional[dict]:
|
| 183 |
+
if (model_name, model_backend) in self.P2T_MODELS:
|
| 184 |
+
info = self.P2T_MODELS[(model_name, model_backend)]
|
| 185 |
+
elif (model_name, model_backend) in self.OUTER_MODELS:
|
| 186 |
+
info = self.OUTER_MODELS[(model_name, model_backend)]
|
| 187 |
+
else:
|
| 188 |
+
logger.warning(
|
| 189 |
+
'no url is found for model %s' % ((model_name, model_backend),)
|
| 190 |
+
)
|
| 191 |
+
return None
|
| 192 |
+
info = format_model_info(info)
|
| 193 |
+
return info
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
AVAILABLE_MODELS = AvailableModels()
|
pix2text/doc_xl_layout/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding: utf-8
|
| 2 |
+
# This whole directory is adapted from https://github.com/AlibabaResearch/AdvancedLiterateMachinery.
|
| 3 |
+
# Thanks to the authors.
|
| 4 |
+
from .doc_xl_layout_parser import DocXLayoutParser
|
pix2text/doc_xl_layout/detectors/__init__.py
ADDED
|
File without changes
|
pix2text/doc_xl_layout/detectors/base_detector_subfield.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding: utf-8
|
| 2 |
+
import os
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
from ..models.model import create_model, load_model
|
| 9 |
+
|
| 10 |
+
# from ..utils.debugger import Debugger
|
| 11 |
+
from ..utils.image import get_affine_transform
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class BaseDetector(object):
|
| 15 |
+
def __init__(self, opt):
|
| 16 |
+
# if opt.gpus[0] >= 0:
|
| 17 |
+
# opt.device = torch.device('cuda')
|
| 18 |
+
# else:
|
| 19 |
+
# opt.device = torch.device('cpu')
|
| 20 |
+
|
| 21 |
+
self.model = create_model(opt.arch, opt.heads, opt.head_conv, opt.convert_onnx, {})
|
| 22 |
+
self.model = load_model(self.model, opt.load_model)
|
| 23 |
+
self.model = self.model.to(opt.device)
|
| 24 |
+
self.model.eval()
|
| 25 |
+
|
| 26 |
+
self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
|
| 27 |
+
self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
|
| 28 |
+
self.max_per_image = opt.K
|
| 29 |
+
self.num_classes = opt.num_classes
|
| 30 |
+
self.scales = opt.test_scales
|
| 31 |
+
self.opt = opt
|
| 32 |
+
self.pause = True
|
| 33 |
+
|
| 34 |
+
def pre_process(self, image, scale, meta=None):
|
| 35 |
+
height, width = image.shape[0:2]
|
| 36 |
+
new_height = int(height * scale)
|
| 37 |
+
new_width = int(width * scale)
|
| 38 |
+
if self.opt.fix_res:
|
| 39 |
+
inp_height, inp_width = self.opt.input_h, self.opt.input_w
|
| 40 |
+
c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
|
| 41 |
+
s = max(height, width) * 1.0
|
| 42 |
+
else:
|
| 43 |
+
inp_height = (new_height | self.opt.pad) # + 1
|
| 44 |
+
inp_width = (new_width | self.opt.pad) # + 1
|
| 45 |
+
c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
|
| 46 |
+
s = np.array([inp_width, inp_height], dtype=np.float32)
|
| 47 |
+
|
| 48 |
+
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
|
| 49 |
+
resized_image = cv2.resize(image, (new_width, new_height))
|
| 50 |
+
inp_image = cv2.warpAffine(
|
| 51 |
+
resized_image, trans_input, (inp_width, inp_height),
|
| 52 |
+
flags=cv2.INTER_LINEAR)
|
| 53 |
+
vis_image = inp_image
|
| 54 |
+
# import pdb; pdb.set_trace()
|
| 55 |
+
inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)
|
| 56 |
+
|
| 57 |
+
images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
|
| 58 |
+
if self.opt.flip_test:
|
| 59 |
+
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
|
| 60 |
+
images = torch.from_numpy(images)
|
| 61 |
+
meta = {'c': c, 's': s,
|
| 62 |
+
'input_height': inp_height,
|
| 63 |
+
'input_width': inp_width,
|
| 64 |
+
'vis_image': vis_image,
|
| 65 |
+
'out_height': inp_height // self.opt.down_ratio,
|
| 66 |
+
'out_width': inp_width // self.opt.down_ratio}
|
| 67 |
+
return images, meta
|
| 68 |
+
|
| 69 |
+
def resize(self, image):
|
| 70 |
+
h, w, _ = image.shape
|
| 71 |
+
scale = self.opt.input_h / (max(w, h) + 1e-4)
|
| 72 |
+
image = cv2.resize(image, (int(w * scale), int(h * scale)))
|
| 73 |
+
image = cv2.copyMakeBorder(image, 0, self.opt.input_h - int(h * scale), 0, self.opt.input_h - int(w * scale),
|
| 74 |
+
cv2.BORDER_CONSTANT, value=[0, 0, 0])
|
| 75 |
+
return image, scale
|
| 76 |
+
|
| 77 |
+
def process(self, images, return_time=False):
|
| 78 |
+
raise NotImplementedError
|
| 79 |
+
|
| 80 |
+
def post_process(self, dets, meta, scale=1):
|
| 81 |
+
raise NotImplementedError
|
| 82 |
+
|
| 83 |
+
def merge_outputs(self, detections):
|
| 84 |
+
raise NotImplementedError
|
| 85 |
+
|
| 86 |
+
def debug(self, debugger, images, dets, output, scale=1):
|
| 87 |
+
raise NotImplementedError
|
| 88 |
+
|
| 89 |
+
def show_results(self, debugger, image, results):
|
| 90 |
+
raise NotImplementedError
|
| 91 |
+
|
| 92 |
+
def ps_convert_minmax(self, results):
|
| 93 |
+
detection = {}
|
| 94 |
+
for j in range(1, self.num_classes + 1):
|
| 95 |
+
detection[j] = []
|
| 96 |
+
for j in range(1, self.num_classes + 1):
|
| 97 |
+
for bbox in results[j]:
|
| 98 |
+
if bbox[8] < self.opt.scores_thresh:
|
| 99 |
+
continue
|
| 100 |
+
minx = max(min(bbox[0], bbox[2], bbox[4], bbox[6]), 0)
|
| 101 |
+
miny = max(min(bbox[1], bbox[3], bbox[5], bbox[7]), 0)
|
| 102 |
+
maxx = max(bbox[0], bbox[2], bbox[4], bbox[6])
|
| 103 |
+
maxy = max(bbox[1], bbox[3], bbox[5], bbox[7])
|
| 104 |
+
detection[j].append([minx, miny, maxx, maxy, bbox[8], bbox[-1]])
|
| 105 |
+
for j in range(1, self.num_classes + 1):
|
| 106 |
+
detection[j] = np.array(detection[j])
|
| 107 |
+
return detection
|
| 108 |
+
|
| 109 |
+
def Duplicate_removal(self, results):
|
| 110 |
+
bbox = []
|
| 111 |
+
for box in results:
|
| 112 |
+
if box[8] > self.opt.scores_thresh:
|
| 113 |
+
# for i in range(8):
|
| 114 |
+
# if box[i] < 0:
|
| 115 |
+
# box[i] = 0
|
| 116 |
+
# if box[i]>self.opt.input_h:
|
| 117 |
+
# box[i]=self.opt.input_h
|
| 118 |
+
bbox.append(box)
|
| 119 |
+
if len(bbox) > 0:
|
| 120 |
+
return np.array(bbox)
|
| 121 |
+
else:
|
| 122 |
+
return np.array([[0] * 12])
|
| 123 |
+
|
| 124 |
+
def run(self, image_or_path_or_tensor, meta=None):
|
| 125 |
+
load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0
|
| 126 |
+
merge_time, tot_time = 0, 0
|
| 127 |
+
# debugger = Debugger(dataset=self.opt.dataset, ipynb=(self.opt.debug == 3), num_classes=self.opt.num_classes,
|
| 128 |
+
# theme=self.opt.debugger_theme)
|
| 129 |
+
start_time = time.time()
|
| 130 |
+
pre_processed = False
|
| 131 |
+
if isinstance(image_or_path_or_tensor, np.ndarray):
|
| 132 |
+
image = image_or_path_or_tensor
|
| 133 |
+
elif type(image_or_path_or_tensor) == type(''):
|
| 134 |
+
image = cv2.imread(image_or_path_or_tensor)
|
| 135 |
+
else:
|
| 136 |
+
image = image_or_path_or_tensor['image'][0].numpy()
|
| 137 |
+
pre_processed_images = image_or_path_or_tensor
|
| 138 |
+
pre_processed = True
|
| 139 |
+
|
| 140 |
+
loaded_time = time.time()
|
| 141 |
+
load_time += (loaded_time - start_time)
|
| 142 |
+
|
| 143 |
+
detections = []
|
| 144 |
+
for scale in self.scales:
|
| 145 |
+
scale_start_time = time.time()
|
| 146 |
+
if not pre_processed:
|
| 147 |
+
images, meta = self.pre_process(image, scale, meta)
|
| 148 |
+
else:
|
| 149 |
+
images = pre_processed_images['images'][scale][0]
|
| 150 |
+
meta = pre_processed_images['meta'][scale]
|
| 151 |
+
meta = {k: v.numpy()[0] for k, v in meta.items()}
|
| 152 |
+
|
| 153 |
+
# import ipdb;ipdb.set_trace()
|
| 154 |
+
# images = np.load('data.npy').astype(np.float32)
|
| 155 |
+
# images = torch.from_numpy(images)
|
| 156 |
+
|
| 157 |
+
images = images.to(self.opt.device)
|
| 158 |
+
# torch.cuda.synchronize()
|
| 159 |
+
pre_process_time = time.time()
|
| 160 |
+
pre_time += pre_process_time - scale_start_time
|
| 161 |
+
output, dets, dets_sub, corner, forward_time = self.process(images, return_time=True)
|
| 162 |
+
# torch.cuda.synchronize()
|
| 163 |
+
net_time += forward_time - pre_process_time
|
| 164 |
+
decode_time = time.time()
|
| 165 |
+
dec_time += decode_time - forward_time
|
| 166 |
+
|
| 167 |
+
# if self.opt.debug >= 2:
|
| 168 |
+
# self.debug(debugger, images, dets, output, scale)
|
| 169 |
+
|
| 170 |
+
dets, corner = self.post_process(dets, corner, meta, scale)
|
| 171 |
+
for j in range(1, self.num_classes + 1):
|
| 172 |
+
dets[j] = self.Duplicate_removal(dets[j])
|
| 173 |
+
|
| 174 |
+
# add sub
|
| 175 |
+
dets_sub, corner = self.post_process(dets_sub, corner, meta, scale)
|
| 176 |
+
for j in range(1, self.num_classes + 1):
|
| 177 |
+
dets_sub[j] = self.Duplicate_removal(dets_sub[j])
|
| 178 |
+
|
| 179 |
+
# import ipdb;ipdb.set_trace()
|
| 180 |
+
# torch.cuda.synchronize()
|
| 181 |
+
post_process_time = time.time()
|
| 182 |
+
post_time += post_process_time - decode_time
|
| 183 |
+
|
| 184 |
+
dets[12] = dets_sub[12]
|
| 185 |
+
dets[13] = dets_sub[13]
|
| 186 |
+
|
| 187 |
+
detections.append(dets)
|
| 188 |
+
|
| 189 |
+
results = self.merge_outputs(detections)
|
| 190 |
+
# torch.cuda.synchronize()
|
| 191 |
+
end_time = time.time()
|
| 192 |
+
merge_time += end_time - post_process_time
|
| 193 |
+
tot_time += end_time - start_time
|
| 194 |
+
|
| 195 |
+
# import pdb; pdb.set_trace()
|
| 196 |
+
if self.opt.debug >= 1:
|
| 197 |
+
if isinstance(image_or_path_or_tensor, str):
|
| 198 |
+
image_name = os.path.basename(image_or_path_or_tensor)
|
| 199 |
+
else:
|
| 200 |
+
print("--> warning: use demo.py for a better visualization")
|
| 201 |
+
image_name = "{}.jpg".format(time.time())
|
| 202 |
+
# self.show_results(debugger, image, results, corner, image_name)
|
| 203 |
+
|
| 204 |
+
return {'results': results, 'tot': tot_time, 'load': load_time,
|
| 205 |
+
'pre': pre_time, 'net': net_time, 'dec': dec_time, 'corner': corner,
|
| 206 |
+
'post': post_time, 'merge': merge_time, 'output': output}
|
pix2text/doc_xl_layout/detectors/ctdet_subfield.py
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding: utf-8
|
| 2 |
+
import time
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
# from external.nms import soft_nms
|
| 7 |
+
from ..external.shapelyNMS import pnms
|
| 8 |
+
from ..models.decode import ctdet_4ps_decode, ctdet_cls_decode
|
| 9 |
+
from ..models.utils import flip_tensor
|
| 10 |
+
from ..utils.post_process import ctdet_4ps_post_process
|
| 11 |
+
from .base_detector_subfield import BaseDetector
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class CtdetDetector_Subfield(BaseDetector):
|
| 15 |
+
def __init__(self, opt):
|
| 16 |
+
super(CtdetDetector_Subfield, self).__init__(opt)
|
| 17 |
+
|
| 18 |
+
def process(self, images, return_time=False):
|
| 19 |
+
# import ipdb;ipdb.set_trace()
|
| 20 |
+
with torch.no_grad():
|
| 21 |
+
output = self.model(images)[-1]
|
| 22 |
+
if self.opt.convert_onnx == 1:
|
| 23 |
+
# torch.cuda.synchronize()
|
| 24 |
+
inputs = ['data']
|
| 25 |
+
outputs = [
|
| 26 |
+
'hm.0.sigmoid',
|
| 27 |
+
'hm.0.maxpool',
|
| 28 |
+
'cls.0.sigmoid',
|
| 29 |
+
'ftype.0.sigmoid',
|
| 30 |
+
'wh.2',
|
| 31 |
+
'reg.2',
|
| 32 |
+
'hm_sub.0.sigmoid',
|
| 33 |
+
'hm_sub.0.maxpool',
|
| 34 |
+
'wh_sub.2',
|
| 35 |
+
'reg_sub.2',
|
| 36 |
+
]
|
| 37 |
+
dynamic_axes = {
|
| 38 |
+
'data': {2: 'h', 3: 'w'},
|
| 39 |
+
'hm.0.sigmoid': {2: 'H', 3: 'W'},
|
| 40 |
+
'hm.0.maxpool': {2: 'H', 3: 'W'},
|
| 41 |
+
'cls.0.sigmoid': {2: 'H', 3: 'W'},
|
| 42 |
+
'ftype.0.sigmoid': {2: 'H', 3: 'W'},
|
| 43 |
+
'wh.2': {2: 'H', 3: 'W'},
|
| 44 |
+
'reg.2': {2: 'H', 3: 'W'},
|
| 45 |
+
'hm_sub.0.sigmoid': {2: 'H', 3: 'W'},
|
| 46 |
+
'hm_sub.0.maxpool': {2: 'H', 3: 'W'},
|
| 47 |
+
'wh_sub.2': {2: 'H', 3: 'W'},
|
| 48 |
+
'reg_sub.2': {2: 'H', 3: 'W'},
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
onnx_path = self.opt.onnx_path
|
| 52 |
+
if self.opt.onnx_path == "auto":
|
| 53 |
+
onnx_path = "{}_{}cls_{}ftype.onnx".format(
|
| 54 |
+
self.opt.dataset,
|
| 55 |
+
self.opt.num_classes,
|
| 56 |
+
self.opt.num_secondary_classes,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
torch.onnx.export(
|
| 60 |
+
self.model,
|
| 61 |
+
images,
|
| 62 |
+
onnx_path,
|
| 63 |
+
input_names=inputs,
|
| 64 |
+
output_names=outputs,
|
| 65 |
+
dynamic_axes=dynamic_axes,
|
| 66 |
+
do_constant_folding=True,
|
| 67 |
+
opset_version=10,
|
| 68 |
+
)
|
| 69 |
+
print("--> info: onnx is saved at: {}".format(onnx_path))
|
| 70 |
+
cls = output['cls_sigmoid']
|
| 71 |
+
hm = output['hm_sigmoid']
|
| 72 |
+
ftype = output['ftype_sigmoid']
|
| 73 |
+
|
| 74 |
+
# add sub
|
| 75 |
+
hm_sub = output['hm_sigmoid_sub']
|
| 76 |
+
else:
|
| 77 |
+
hm = output['hm'].sigmoid_()
|
| 78 |
+
cls = output['cls'].sigmoid_()
|
| 79 |
+
ftype = output['ftype'].sigmoid_()
|
| 80 |
+
|
| 81 |
+
# add sub
|
| 82 |
+
hm_sub = output['hm_sub'].sigmoid_()
|
| 83 |
+
|
| 84 |
+
wh = output['wh']
|
| 85 |
+
reg = output['reg'] if self.opt.reg_offset else None
|
| 86 |
+
|
| 87 |
+
# add sub
|
| 88 |
+
wh_sub = output['wh_sub']
|
| 89 |
+
reg_sub = output['reg_sub'] if self.opt.reg_offset else None
|
| 90 |
+
|
| 91 |
+
if self.opt.flip_test:
|
| 92 |
+
hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2
|
| 93 |
+
wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2
|
| 94 |
+
reg = reg[0:1] if reg is not None else None
|
| 95 |
+
# torch.cuda.synchronize()
|
| 96 |
+
forward_time = time.time()
|
| 97 |
+
# return dets [bboxes, scores, clses]
|
| 98 |
+
# breakpoint()
|
| 99 |
+
dets, inds = ctdet_4ps_decode(hm, wh, reg=reg, K=self.opt.K)
|
| 100 |
+
|
| 101 |
+
# add sub
|
| 102 |
+
dets_sub, inds_sub = ctdet_4ps_decode(
|
| 103 |
+
hm_sub, wh_sub, reg=reg_sub, K=self.opt.K
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
box_cls = ctdet_cls_decode(cls, inds)
|
| 107 |
+
box_ftype = ctdet_cls_decode(ftype, inds)
|
| 108 |
+
clses = torch.argmax(box_cls, dim=2, keepdim=True)
|
| 109 |
+
ftypes = torch.argmax(box_ftype, dim=2, keepdim=True)
|
| 110 |
+
dets = np.concatenate(
|
| 111 |
+
(
|
| 112 |
+
dets.detach().cpu().numpy(),
|
| 113 |
+
clses.detach().cpu().numpy(),
|
| 114 |
+
ftypes.detach().cpu().numpy(),
|
| 115 |
+
),
|
| 116 |
+
axis=2,
|
| 117 |
+
)
|
| 118 |
+
dets = np.array(dets)
|
| 119 |
+
|
| 120 |
+
# add subfield
|
| 121 |
+
dets_sub = np.concatenate(
|
| 122 |
+
(
|
| 123 |
+
dets_sub.detach().cpu().numpy(),
|
| 124 |
+
clses.detach().cpu().numpy(),
|
| 125 |
+
ftypes.detach().cpu().numpy(),
|
| 126 |
+
),
|
| 127 |
+
axis=2,
|
| 128 |
+
)
|
| 129 |
+
dets_sub = np.array(dets_sub)
|
| 130 |
+
dets_sub[:, :, -3] += 11
|
| 131 |
+
|
| 132 |
+
corner = 0
|
| 133 |
+
|
| 134 |
+
if return_time:
|
| 135 |
+
return output, dets, dets_sub, corner, forward_time
|
| 136 |
+
else:
|
| 137 |
+
return output, dets, dets_sub
|
| 138 |
+
|
| 139 |
+
def post_process(self, dets, corner, meta, scale=1):
|
| 140 |
+
if self.opt.nms:
|
| 141 |
+
detn = pnms(dets[0], self.opt.scores_thresh)
|
| 142 |
+
if detn.shape[0] > 0:
|
| 143 |
+
dets = detn.reshape(1, -1, detn.shape[1])
|
| 144 |
+
k = dets.shape[2] if dets.shape[1] != 0 else 0
|
| 145 |
+
if dets.shape[1] != 0:
|
| 146 |
+
dets = dets.reshape(1, -1, dets.shape[2])
|
| 147 |
+
# return dets is list and what in dets is dict. key of dict is classes, value of dict is [bbox,score]
|
| 148 |
+
dets = ctdet_4ps_post_process(
|
| 149 |
+
dets.copy(),
|
| 150 |
+
[meta['c']],
|
| 151 |
+
[meta['s']],
|
| 152 |
+
meta['out_height'],
|
| 153 |
+
meta['out_width'],
|
| 154 |
+
self.opt.num_classes,
|
| 155 |
+
)
|
| 156 |
+
for j in range(1, self.num_classes + 1):
|
| 157 |
+
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, k)
|
| 158 |
+
dets[0][j][:, :8] /= scale
|
| 159 |
+
else:
|
| 160 |
+
ret = {}
|
| 161 |
+
dets = []
|
| 162 |
+
for j in range(1, self.num_classes + 1):
|
| 163 |
+
ret[j] = np.array([0] * k, dtype=np.float32) # .reshape(-1, k)
|
| 164 |
+
dets.append(ret)
|
| 165 |
+
return dets[0], corner
|
| 166 |
+
|
| 167 |
+
def merge_outputs(self, detections):
|
| 168 |
+
results = {}
|
| 169 |
+
for j in range(1, self.num_classes + 1):
|
| 170 |
+
results[j] = np.concatenate(
|
| 171 |
+
[detection[j] for detection in detections], axis=0
|
| 172 |
+
).astype(np.float32)
|
| 173 |
+
# if len(self.scales) > 1 or self.opt.nms:
|
| 174 |
+
# results[j] = pnms(results[j],self.opt.nms_thresh)
|
| 175 |
+
shape_num = 0
|
| 176 |
+
for j in range(1, self.num_classes + 1):
|
| 177 |
+
shape_num = shape_num + len(results[j])
|
| 178 |
+
if shape_num != 0:
|
| 179 |
+
# print(np.array(results[1]))
|
| 180 |
+
scores = np.hstack(
|
| 181 |
+
[results[j][:, 8] for j in range(1, self.num_classes + 1)]
|
| 182 |
+
)
|
| 183 |
+
else:
|
| 184 |
+
scores = []
|
| 185 |
+
if len(scores) > self.max_per_image:
|
| 186 |
+
kth = len(scores) - self.max_per_image
|
| 187 |
+
thresh = np.partition(scores, kth)[kth]
|
| 188 |
+
for j in range(1, self.num_classes + 1):
|
| 189 |
+
keep_inds = results[j][:, 8] >= thresh
|
| 190 |
+
results[j] = results[j][keep_inds]
|
| 191 |
+
return results
|
| 192 |
+
|
| 193 |
+
def debug(self, debugger, images, dets, output, scale=1):
|
| 194 |
+
# detection = dets.detach().cpu().numpy().copy()
|
| 195 |
+
detection = dets.copy()
|
| 196 |
+
detection[:, :, :8] *= self.opt.down_ratio
|
| 197 |
+
for i in range(1):
|
| 198 |
+
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
|
| 199 |
+
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
|
| 200 |
+
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
|
| 201 |
+
debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale))
|
| 202 |
+
debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale))
|
| 203 |
+
# pdb.set_trace()
|
| 204 |
+
for k in range(len(dets[i])):
|
| 205 |
+
if detection[i, k, 8] > self.opt.center_thresh:
|
| 206 |
+
debugger.add_4ps_coco_bbox(
|
| 207 |
+
detection[i, k, :8],
|
| 208 |
+
detection[i, k, -1],
|
| 209 |
+
detection[i, k, 8],
|
| 210 |
+
img_id='out_pred_{:.1f}'.format(scale),
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
def show_results(self, debugger, image, results, Corners, image_name):
|
| 214 |
+
debugger.add_img(image, img_id='ctdet')
|
| 215 |
+
count = 0
|
| 216 |
+
for j in range(1, self.num_classes + 1):
|
| 217 |
+
for bbox in results[j]:
|
| 218 |
+
if bbox[8] > self.opt.scores_thresh:
|
| 219 |
+
count += 1
|
| 220 |
+
# print("bbox info:",j-1, bbox.tolist())
|
| 221 |
+
# print(j-1)
|
| 222 |
+
debugger.add_4ps_coco_bbox(
|
| 223 |
+
bbox, j - 1, bbox[8], show_txt=True, img_id='ctdet'
|
| 224 |
+
)
|
| 225 |
+
debugger.save_all_imgs(image_name, './outputs/')
|
pix2text/doc_xl_layout/detectors/detector_factory.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding: utf-8
|
| 2 |
+
|
| 3 |
+
from .ctdet_subfield import CtdetDetector_Subfield
|
| 4 |
+
|
| 5 |
+
detector_factory = {
|
| 6 |
+
'ctdet_subfield': CtdetDetector_Subfield
|
| 7 |
+
}
|
pix2text/doc_xl_layout/doc_xl_layout_parser.py
ADDED
|
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding: utf-8
|
| 2 |
+
# Adapted from https://github.com/AlibabaResearch/AdvancedLiterateMachinery
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import shutil
|
| 6 |
+
from collections import defaultdict
|
| 7 |
+
from copy import deepcopy, copy
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
import logging
|
| 10 |
+
from typing import Union, List, Dict, Any, Optional
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
from PIL import Image
|
| 14 |
+
|
| 15 |
+
from .opts import opts
|
| 16 |
+
from .huntie_subfield import Huntie_Subfield
|
| 17 |
+
from .detectors.detector_factory import detector_factory
|
| 18 |
+
from .wrapper import wrap_result
|
| 19 |
+
from ..consts import MODEL_VERSION
|
| 20 |
+
from ..layout_parser import LayoutParser, ElementType
|
| 21 |
+
from ..utils import (
|
| 22 |
+
select_device,
|
| 23 |
+
read_img,
|
| 24 |
+
data_dir,
|
| 25 |
+
save_layout_img,
|
| 26 |
+
clipbox,
|
| 27 |
+
overlap,
|
| 28 |
+
box2list,
|
| 29 |
+
x_overlap,
|
| 30 |
+
merge_boxes,
|
| 31 |
+
prepare_model_files2,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
logger = logging.getLogger(__name__)
|
| 35 |
+
|
| 36 |
+
CATEGORIES = {
|
| 37 |
+
"title": 0,
|
| 38 |
+
"figure": 1,
|
| 39 |
+
"plain text": 2,
|
| 40 |
+
"header": 3,
|
| 41 |
+
"page number": 4,
|
| 42 |
+
"footnote": 5,
|
| 43 |
+
"footer": 6,
|
| 44 |
+
"table": 7,
|
| 45 |
+
"table caption": 8,
|
| 46 |
+
"figure caption": 9,
|
| 47 |
+
"equation": 10,
|
| 48 |
+
"full column": 11,
|
| 49 |
+
"sub column": 12,
|
| 50 |
+
}
|
| 51 |
+
CATEGORY_MAPPING = [''] * len(CATEGORIES)
|
| 52 |
+
for cate, idx in CATEGORIES.items():
|
| 53 |
+
CATEGORY_MAPPING[idx] = cate
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class DocXLayoutOutput:
|
| 57 |
+
def __init__(self, layout_detection_info, subfield_detection_info, message=''):
|
| 58 |
+
self.layout_detection_info = layout_detection_info
|
| 59 |
+
self.subfield_detection_info = subfield_detection_info
|
| 60 |
+
self.message = message
|
| 61 |
+
|
| 62 |
+
def to_json(self):
|
| 63 |
+
return wrap_result(
|
| 64 |
+
self.layout_detection_info, self.subfield_detection_info, CATEGORY_MAPPING
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class DocXLayoutParser(LayoutParser):
|
| 69 |
+
ignored_types = {'footnote', 'footer', 'page number'}
|
| 70 |
+
type_mappings = {
|
| 71 |
+
'title': ElementType.TITLE,
|
| 72 |
+
'figure': ElementType.FIGURE,
|
| 73 |
+
'plain text': ElementType.TEXT,
|
| 74 |
+
'header': ElementType.TEXT,
|
| 75 |
+
'table': ElementType.TABLE,
|
| 76 |
+
'table caption': ElementType.TEXT,
|
| 77 |
+
'figure caption': ElementType.TEXT,
|
| 78 |
+
'equation': ElementType.FORMULA,
|
| 79 |
+
}
|
| 80 |
+
# types that are isolated and usually don't cross different columns. They should not be merged with other elements
|
| 81 |
+
is_isolated = {'header', 'table caption', 'figure caption', 'equation'}
|
| 82 |
+
|
| 83 |
+
def __init__(
|
| 84 |
+
self,
|
| 85 |
+
device: str = None,
|
| 86 |
+
model_fp: Optional[str] = None,
|
| 87 |
+
root: Union[str, Path] = data_dir(),
|
| 88 |
+
**kwargs,
|
| 89 |
+
):
|
| 90 |
+
if model_fp is None:
|
| 91 |
+
model_fp = self._prepare_model_files(root, None)
|
| 92 |
+
new_params = {
|
| 93 |
+
'task': 'ctdet_subfield',
|
| 94 |
+
'arch': 'dlav0subfield_34',
|
| 95 |
+
'input_res': 768,
|
| 96 |
+
'num_classes': 13,
|
| 97 |
+
'scores_thresh': kwargs.get('scores_thresh', 0.35),
|
| 98 |
+
'load_model': str(model_fp),
|
| 99 |
+
'debug': kwargs.get('debug', 0),
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
opt = opts().parse(new_params)
|
| 103 |
+
opt = opts().update_dataset_info_and_set_heads(opt, Huntie_Subfield)
|
| 104 |
+
opt.device = select_device(device)
|
| 105 |
+
|
| 106 |
+
Detector = detector_factory[opt.task]
|
| 107 |
+
detector = Detector(opt)
|
| 108 |
+
self.detector = detector
|
| 109 |
+
self.opt = opt
|
| 110 |
+
logger.debug("DocXLayoutParser parameters %s", self.opt)
|
| 111 |
+
|
| 112 |
+
@classmethod
|
| 113 |
+
def from_config(cls, configs: Optional[dict] = None, device: str = None, **kwargs):
|
| 114 |
+
configs = copy(configs or {})
|
| 115 |
+
device = select_device(device)
|
| 116 |
+
model_fp = configs.pop('model_fp', None)
|
| 117 |
+
root = configs.pop('root', data_dir())
|
| 118 |
+
configs.pop('device', None)
|
| 119 |
+
|
| 120 |
+
return cls(device=device, model_fp=model_fp, root=root, **configs)
|
| 121 |
+
|
| 122 |
+
def _prepare_model_files(self, root, model_info):
|
| 123 |
+
model_root_dir = Path(root).expanduser() / MODEL_VERSION
|
| 124 |
+
model_dir = model_root_dir / 'layout-parser'
|
| 125 |
+
model_fp = model_dir / 'DocXLayout_231012.pth'
|
| 126 |
+
if model_fp.exists():
|
| 127 |
+
return model_fp
|
| 128 |
+
model_fp = prepare_model_files2(
|
| 129 |
+
model_fp_or_dir=model_fp,
|
| 130 |
+
remote_repo="breezedeus/pix2text-layout",
|
| 131 |
+
file_or_dir="file",
|
| 132 |
+
)
|
| 133 |
+
return model_fp
|
| 134 |
+
|
| 135 |
+
def convert_eval_format(self, all_bboxes, opt):
|
| 136 |
+
layout_detection_items = []
|
| 137 |
+
subfield_detection_items = []
|
| 138 |
+
for cls_ind in all_bboxes:
|
| 139 |
+
for box in all_bboxes[cls_ind]:
|
| 140 |
+
if box[8] < opt.scores_thresh:
|
| 141 |
+
continue
|
| 142 |
+
pts = np.round(box).tolist()[:8]
|
| 143 |
+
score = box[8]
|
| 144 |
+
category_id = box[9]
|
| 145 |
+
# direction_id = box[10]
|
| 146 |
+
# secondary_id = box[11]
|
| 147 |
+
detection = {
|
| 148 |
+
"category_id": int(category_id),
|
| 149 |
+
# "secondary_id": int(secondary_id),
|
| 150 |
+
# "direction_id": int(direction_id),
|
| 151 |
+
"poly": pts,
|
| 152 |
+
"score": float("{:.2f}".format(score)),
|
| 153 |
+
}
|
| 154 |
+
if cls_ind in (12, 13):
|
| 155 |
+
subfield_detection_items.append(detection)
|
| 156 |
+
else:
|
| 157 |
+
layout_detection_items.append(detection)
|
| 158 |
+
return layout_detection_items, subfield_detection_items
|
| 159 |
+
|
| 160 |
+
def parse(
|
| 161 |
+
self,
|
| 162 |
+
img: Union[str, Path, Image.Image],
|
| 163 |
+
table_as_image: bool = False,
|
| 164 |
+
**kwargs,
|
| 165 |
+
) -> (List[Dict[str, Any]], Dict[str, Any]):
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
img ():
|
| 170 |
+
table_as_image ():
|
| 171 |
+
**kwargs ():
|
| 172 |
+
* save_debug_res (str): if `save_debug_res` is set, the directory to save the debug results; default value is `None`, which means not to save
|
| 173 |
+
* expansion_margin (int): expansion margin
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
|
| 177 |
+
"""
|
| 178 |
+
if isinstance(img, Image.Image):
|
| 179 |
+
img0 = img.convert('RGB')
|
| 180 |
+
else:
|
| 181 |
+
img0 = read_img(img, return_type='Image')
|
| 182 |
+
img_width, img_height = img0.size
|
| 183 |
+
try:
|
| 184 |
+
# to np.array, RGB -> BGR
|
| 185 |
+
ret = self.detector.run(np.array(img0)[:, :, ::-1])
|
| 186 |
+
layout_detection_info, subfield_detection_info = self.convert_eval_format(
|
| 187 |
+
ret['results'], self.opt
|
| 188 |
+
)
|
| 189 |
+
out = DocXLayoutOutput(
|
| 190 |
+
layout_detection_info, subfield_detection_info, message='success'
|
| 191 |
+
)
|
| 192 |
+
except Exception as e:
|
| 193 |
+
logger.warning("DocXLayoutPredictor Error %s", repr(e))
|
| 194 |
+
out = DocXLayoutOutput([], [], message=repr(e))
|
| 195 |
+
|
| 196 |
+
layout_out = out.to_json()
|
| 197 |
+
debug_dir = None
|
| 198 |
+
if kwargs.get('save_debug_res', None):
|
| 199 |
+
debug_dir = Path(kwargs.get('save_debug_res'))
|
| 200 |
+
debug_dir.mkdir(exist_ok=True, parents=True)
|
| 201 |
+
if debug_dir is not None:
|
| 202 |
+
with open(debug_dir / 'layout_out.json', 'w', encoding='utf-8') as f:
|
| 203 |
+
json.dump(
|
| 204 |
+
layout_out, f, indent=2, ensure_ascii=False,
|
| 205 |
+
)
|
| 206 |
+
if layout_out:
|
| 207 |
+
layout_out = self._preprocess_outputs(img0, layout_out)
|
| 208 |
+
layout_out, column_meta = self._format_outputs(
|
| 209 |
+
img0, layout_out, table_as_image
|
| 210 |
+
)
|
| 211 |
+
else:
|
| 212 |
+
layout_out, column_meta = [], {}
|
| 213 |
+
|
| 214 |
+
layout_out = self._merge_overlapped_boxes(layout_out)
|
| 215 |
+
|
| 216 |
+
expansion_margin = kwargs.get('expansion_margin', 8)
|
| 217 |
+
layout_out = self._expand_boxes(
|
| 218 |
+
layout_out, expansion_margin, height=img_height, width=img_width
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
save_layout_fp = kwargs.get(
|
| 222 |
+
'save_layout_res',
|
| 223 |
+
debug_dir / 'layout_res.jpg' if debug_dir is not None else None,
|
| 224 |
+
)
|
| 225 |
+
if save_layout_fp:
|
| 226 |
+
element_type_list = [t for t in ElementType]
|
| 227 |
+
save_layout_img(
|
| 228 |
+
img0,
|
| 229 |
+
element_type_list,
|
| 230 |
+
layout_out,
|
| 231 |
+
save_path=save_layout_fp,
|
| 232 |
+
key='position',
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
return layout_out, column_meta
|
| 236 |
+
|
| 237 |
+
def _preprocess_outputs(self, img0, outs):
|
| 238 |
+
width, height = img0.size
|
| 239 |
+
|
| 240 |
+
subfields = outs['subfields']
|
| 241 |
+
for column_info in subfields:
|
| 242 |
+
layout_out = column_info['layouts']
|
| 243 |
+
if len(layout_out) < 2:
|
| 244 |
+
continue
|
| 245 |
+
for idx, cur_box_info in enumerate(layout_out[:-1]):
|
| 246 |
+
next_box_info = layout_out[idx + 1]
|
| 247 |
+
cur_box_ymax = cur_box_info['pts'][-1]
|
| 248 |
+
next_box_ymin = next_box_info['pts'][1]
|
| 249 |
+
if (
|
| 250 |
+
cur_box_info['category'] == 'figure'
|
| 251 |
+
and next_box_info['category'] == 'figure caption'
|
| 252 |
+
and -6 < next_box_ymin - cur_box_ymax < 80
|
| 253 |
+
):
|
| 254 |
+
new_xmin = min(cur_box_info['pts'][0], next_box_info['pts'][0])
|
| 255 |
+
# new_xmin = max(new_xmin, 0, col_pts[0])
|
| 256 |
+
new_xmax = max(cur_box_info['pts'][2], next_box_info['pts'][2])
|
| 257 |
+
# new_xmax = min(new_xmax, )
|
| 258 |
+
new_ymin = max(0, cur_box_info['pts'][1])
|
| 259 |
+
new_ymax = max(cur_box_ymax, next_box_ymin - 16)
|
| 260 |
+
new_box = [
|
| 261 |
+
new_xmin,
|
| 262 |
+
new_ymin,
|
| 263 |
+
new_xmax,
|
| 264 |
+
new_ymin,
|
| 265 |
+
new_xmax,
|
| 266 |
+
new_ymax,
|
| 267 |
+
new_xmin,
|
| 268 |
+
new_ymax,
|
| 269 |
+
]
|
| 270 |
+
layout_out[idx]['pts'] = new_box
|
| 271 |
+
# FIXME: first figure caption, then figure
|
| 272 |
+
|
| 273 |
+
return outs
|
| 274 |
+
|
| 275 |
+
def _format_outputs(self, img0, out, table_as_image: bool):
|
| 276 |
+
width, height = img0.size
|
| 277 |
+
|
| 278 |
+
column_meta = defaultdict(dict)
|
| 279 |
+
final_out = []
|
| 280 |
+
subfields = out['subfields']
|
| 281 |
+
col_number = 0
|
| 282 |
+
for column_info in subfields:
|
| 283 |
+
if column_info['category'] == 'sub column':
|
| 284 |
+
cur_col_number = col_number
|
| 285 |
+
col_number += 1
|
| 286 |
+
elif column_info['category'] == 'full column': # == 'full column'
|
| 287 |
+
cur_col_number = -1
|
| 288 |
+
else: # '其他'
|
| 289 |
+
cur_col_number = -2
|
| 290 |
+
box = clipbox(np.array(column_info['pts']).reshape(4, 2), height, width)
|
| 291 |
+
column_meta[cur_col_number]['position'] = box
|
| 292 |
+
column_meta[cur_col_number]['score'] = column_info['confidence']
|
| 293 |
+
layout_out = column_info['layouts']
|
| 294 |
+
for box_info in layout_out:
|
| 295 |
+
image_type = box_info['category']
|
| 296 |
+
isolated = image_type in self.is_isolated
|
| 297 |
+
if image_type in self.ignored_types:
|
| 298 |
+
image_type = ElementType.IGNORED
|
| 299 |
+
else:
|
| 300 |
+
image_type = self.type_mappings.get(image_type, ElementType.UNKNOWN)
|
| 301 |
+
if table_as_image and image_type == ElementType.TABLE:
|
| 302 |
+
image_type = ElementType.FIGURE
|
| 303 |
+
box = clipbox(np.array(box_info['pts']).reshape(4, 2), height, width)
|
| 304 |
+
final_out.append(
|
| 305 |
+
{
|
| 306 |
+
'type': image_type,
|
| 307 |
+
'position': box,
|
| 308 |
+
'score': box_info['confidence'],
|
| 309 |
+
'col_number': cur_col_number,
|
| 310 |
+
'isolated': isolated,
|
| 311 |
+
}
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
if -2 in column_meta and -1 in column_meta:
|
| 315 |
+
filtered_out = []
|
| 316 |
+
full_column_box = column_meta[-1]['position']
|
| 317 |
+
full_column_xmin, full_column_xmax = (
|
| 318 |
+
full_column_box[0, 0],
|
| 319 |
+
full_column_box[1, 0],
|
| 320 |
+
)
|
| 321 |
+
for box_info in final_out:
|
| 322 |
+
if box_info['col_number'] != -2:
|
| 323 |
+
filtered_out.append(box_info)
|
| 324 |
+
continue
|
| 325 |
+
cur_box = box_info['position']
|
| 326 |
+
cur_box_xmin, cur_box_xmax = cur_box[0, 0], cur_box[1, 0]
|
| 327 |
+
cur_box_ymin, cur_box_ymax = cur_box[0, 1], cur_box[2, 1]
|
| 328 |
+
if (
|
| 329 |
+
box_info['type'] == ElementType.TEXT
|
| 330 |
+
and (
|
| 331 |
+
cur_box_xmax < full_column_xmin
|
| 332 |
+
or cur_box_xmin > full_column_xmax
|
| 333 |
+
)
|
| 334 |
+
and cur_box_ymax - cur_box_ymin > 5 * (cur_box_xmax - cur_box_xmin)
|
| 335 |
+
): # unnecessary block
|
| 336 |
+
box_info['type'] = ElementType.IGNORED
|
| 337 |
+
filtered_out.append(box_info)
|
| 338 |
+
|
| 339 |
+
final_out = filtered_out
|
| 340 |
+
|
| 341 |
+
# handle abnormal elements (col_number == -2)
|
| 342 |
+
if -2 in column_meta:
|
| 343 |
+
column_meta.pop(-2)
|
| 344 |
+
# guess which column the box belongs to
|
| 345 |
+
for _box_info in final_out:
|
| 346 |
+
if _box_info['col_number'] != -2:
|
| 347 |
+
continue
|
| 348 |
+
overlap_vals = []
|
| 349 |
+
for col_number, col_info in column_meta.items():
|
| 350 |
+
overlap_val = x_overlap(_box_info, col_info, key='position')
|
| 351 |
+
overlap_vals.append([col_number, overlap_val])
|
| 352 |
+
if overlap_vals:
|
| 353 |
+
overlap_vals.sort(key=lambda x: (x[1], x[0]), reverse=True)
|
| 354 |
+
match_col_number = overlap_vals[0][0]
|
| 355 |
+
_box_info['col_number'] = match_col_number
|
| 356 |
+
else:
|
| 357 |
+
_box_info['col_number'] = 0
|
| 358 |
+
|
| 359 |
+
return final_out, column_meta
|
| 360 |
+
|
| 361 |
+
@classmethod
|
| 362 |
+
def _merge_overlapped_boxes(cls, layout_out):
|
| 363 |
+
"""
|
| 364 |
+
Detected bounding boxes may overlap; merge these overlapping boxes into a single one.
|
| 365 |
+
"""
|
| 366 |
+
if len(layout_out) < 2:
|
| 367 |
+
return layout_out
|
| 368 |
+
layout_out = deepcopy(layout_out)
|
| 369 |
+
|
| 370 |
+
def _overlay_vertically(box1, box2):
|
| 371 |
+
if x_overlap(box1, box2, key=None) < 0.8:
|
| 372 |
+
return False
|
| 373 |
+
box1 = box2list(box1)
|
| 374 |
+
box2 = box2list(box2)
|
| 375 |
+
# 判断是否有交集
|
| 376 |
+
if box1[3] <= box2[1] or box2[3] <= box1[1]:
|
| 377 |
+
return False
|
| 378 |
+
# 计算交集的高度
|
| 379 |
+
y_min = max(box1[1], box2[1])
|
| 380 |
+
y_max = min(box1[3], box2[3])
|
| 381 |
+
return y_max - y_min > 10
|
| 382 |
+
|
| 383 |
+
for anchor_idx, anchor_box_info in enumerate(layout_out):
|
| 384 |
+
if anchor_box_info['type'] != ElementType.TEXT or anchor_box_info.get(
|
| 385 |
+
'used', False
|
| 386 |
+
):
|
| 387 |
+
continue
|
| 388 |
+
for cand_idx, cand_box_info in enumerate(layout_out):
|
| 389 |
+
if anchor_idx == cand_idx:
|
| 390 |
+
continue
|
| 391 |
+
if cand_box_info['type'] != ElementType.TEXT or cand_box_info.get(
|
| 392 |
+
'used', False
|
| 393 |
+
):
|
| 394 |
+
continue
|
| 395 |
+
if not _overlay_vertically(
|
| 396 |
+
anchor_box_info['position'], cand_box_info['position']
|
| 397 |
+
):
|
| 398 |
+
continue
|
| 399 |
+
anchor_box_info['position'] = merge_boxes(
|
| 400 |
+
anchor_box_info['position'], cand_box_info['position']
|
| 401 |
+
)
|
| 402 |
+
cand_box_info['used'] = True
|
| 403 |
+
|
| 404 |
+
return [box_info for box_info in layout_out if not box_info.get('used', False)]
|
| 405 |
+
|
| 406 |
+
@classmethod
|
| 407 |
+
def _expand_boxes(cls, layout_out, expansion_margin, height, width):
|
| 408 |
+
"""
|
| 409 |
+
Expand boxes with some margin to get better results
|
| 410 |
+
Args:
|
| 411 |
+
layout_out (): layout_out
|
| 412 |
+
expansion_margin (int): expansion margin
|
| 413 |
+
height (int): height of the image
|
| 414 |
+
width (int): width of the image
|
| 415 |
+
|
| 416 |
+
Returns: layout_out with expanded boxes
|
| 417 |
+
|
| 418 |
+
"""
|
| 419 |
+
|
| 420 |
+
def _overlap_with_some_box(idx, anchor_box):
|
| 421 |
+
# anchor_box = layout_out[idx]
|
| 422 |
+
return any(
|
| 423 |
+
[
|
| 424 |
+
overlap(anchor_box, box_info['position'], key=None) > 0
|
| 425 |
+
for idx2, box_info in enumerate(layout_out)
|
| 426 |
+
if idx2 != idx
|
| 427 |
+
]
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
for idx, box_info in enumerate(layout_out):
|
| 431 |
+
if box_info['type'] not in (
|
| 432 |
+
ElementType.TEXT,
|
| 433 |
+
ElementType.TITLE,
|
| 434 |
+
ElementType.FORMULA,
|
| 435 |
+
):
|
| 436 |
+
continue
|
| 437 |
+
if _overlap_with_some_box(idx, box_info['position']):
|
| 438 |
+
continue
|
| 439 |
+
|
| 440 |
+
# expand xmin and xmax
|
| 441 |
+
new_box = box_info['position'].copy()
|
| 442 |
+
xmin, xmax = new_box[0, 0], new_box[1, 0]
|
| 443 |
+
xmin -= expansion_margin
|
| 444 |
+
xmax += expansion_margin
|
| 445 |
+
if xmin <= 8:
|
| 446 |
+
xmin = 0
|
| 447 |
+
if xmax + 8 >= width:
|
| 448 |
+
xmax = width
|
| 449 |
+
new_box[0, 0] = new_box[3, 0] = xmin
|
| 450 |
+
new_box = clipbox(new_box, height, width)
|
| 451 |
+
if not _overlap_with_some_box(idx, new_box):
|
| 452 |
+
layout_out[idx]['position'] = new_box
|
| 453 |
+
new_box = layout_out[idx]['position'].copy()
|
| 454 |
+
new_box[1, 0] = new_box[2, 0] = xmax
|
| 455 |
+
new_box = clipbox(new_box, height, width)
|
| 456 |
+
if not _overlap_with_some_box(idx, new_box):
|
| 457 |
+
layout_out[idx]['position'] = new_box
|
| 458 |
+
|
| 459 |
+
# expand ymin and ymax
|
| 460 |
+
new_box = layout_out[idx]['position'].copy()
|
| 461 |
+
ymin, ymax = new_box[0, 1], new_box[2, 1]
|
| 462 |
+
ymin -= expansion_margin
|
| 463 |
+
ymax += expansion_margin
|
| 464 |
+
if ymin <= 8:
|
| 465 |
+
ymin = 0
|
| 466 |
+
if ymax + 8 >= height:
|
| 467 |
+
ymax = height
|
| 468 |
+
new_box[0, 1] = new_box[1, 1] = ymin
|
| 469 |
+
new_box = clipbox(new_box, height, width)
|
| 470 |
+
if not _overlap_with_some_box(idx, new_box):
|
| 471 |
+
layout_out[idx]['position'] = new_box
|
| 472 |
+
new_box = layout_out[idx]['position'].copy()
|
| 473 |
+
new_box[2, 1] = new_box[3, 1] = ymax
|
| 474 |
+
new_box = clipbox(new_box, height, width)
|
| 475 |
+
if not _overlap_with_some_box(idx, new_box):
|
| 476 |
+
layout_out[idx]['position'] = new_box
|
| 477 |
+
|
| 478 |
+
return layout_out
|
pix2text/doc_xl_layout/external/__init__.py
ADDED
|
File without changes
|
pix2text/doc_xl_layout/external/shapelyNMS.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def pnms(dets, thresh):
|
| 5 |
+
if len(dets) < 2:
|
| 6 |
+
return dets
|
| 7 |
+
scores = dets[:, 8]
|
| 8 |
+
index_keep = []
|
| 9 |
+
keep = []
|
| 10 |
+
for i in range(len(dets)):
|
| 11 |
+
box = dets[i]
|
| 12 |
+
if box[8] < thresh:
|
| 13 |
+
continue
|
| 14 |
+
max_score_index = -1
|
| 15 |
+
ctx = (dets[i][0] + dets[i][2] + dets[i][4] + dets[i][6]) / 4
|
| 16 |
+
cty = (dets[i][1] + dets[i][3] + dets[i][5] + dets[i][7]) / 4
|
| 17 |
+
for j in range(len(dets)):
|
| 18 |
+
if i == j or dets[j][8] < thresh:
|
| 19 |
+
continue
|
| 20 |
+
x1, y1 = dets[j][0], dets[j][1]
|
| 21 |
+
x2, y2 = dets[j][2], dets[j][3]
|
| 22 |
+
x3, y3 = dets[j][4], dets[j][5]
|
| 23 |
+
x4, y4 = dets[j][6], dets[j][7]
|
| 24 |
+
a = (x2 - x1) * (cty - y1) - (y2 - y1) * (ctx - x1)
|
| 25 |
+
b = (x3 - x2) * (cty - y2) - (y3 - y2) * (ctx - x2)
|
| 26 |
+
c = (x4 - x3) * (cty - y3) - (y4 - y3) * (ctx - x3)
|
| 27 |
+
d = (x1 - x4) * (cty - y4) - (y1 - y4) * (ctx - x4)
|
| 28 |
+
if ((a > 0 and b > 0 and c > 0 and d > 0) or (a < 0 and b < 0 and c < 0 and d < 0)):
|
| 29 |
+
if dets[i][8] > dets[j][8] and max_score_index < 0:
|
| 30 |
+
max_score_index = i
|
| 31 |
+
elif dets[i][8] < dets[j][8]:
|
| 32 |
+
max_score_index = -2
|
| 33 |
+
break
|
| 34 |
+
if max_score_index > -1:
|
| 35 |
+
index_keep.append(max_score_index)
|
| 36 |
+
elif max_score_index == -1:
|
| 37 |
+
index_keep.append(i)
|
| 38 |
+
for i in range(0, len(index_keep)):
|
| 39 |
+
keep.append(dets[index_keep[i]])
|
| 40 |
+
|
| 41 |
+
return np.array(keep)
|
| 42 |
+
|
| 43 |
+
'''
|
| 44 |
+
pts = []
|
| 45 |
+
for i in range(dets.shape[0]):
|
| 46 |
+
pts.append([dets[i][0:2],dets[i][2:4],dets[i][4:6],dets[i][6:8]])
|
| 47 |
+
|
| 48 |
+
areas = np.zeros(scores.shape)
|
| 49 |
+
order = scores.argsort()[::-1]
|
| 50 |
+
inter_areas = np.zeros((scores.shape[0],scores.shape[0]))
|
| 51 |
+
|
| 52 |
+
for i in range(0,len(pts)):
|
| 53 |
+
poly = Polygon(pts[i])
|
| 54 |
+
areas[i] = poly.area
|
| 55 |
+
|
| 56 |
+
for j in range(i, len(pts)):
|
| 57 |
+
polyj = Polygon(pts[j])
|
| 58 |
+
try:
|
| 59 |
+
inS = poly.intersection(polyj)
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(pts[i],'\n',pts[j])
|
| 62 |
+
return dets
|
| 63 |
+
inter_areas[i][j] = inS.area
|
| 64 |
+
inter_areas[j][i] = inS.area
|
| 65 |
+
|
| 66 |
+
keep = []
|
| 67 |
+
while order.size > 0:
|
| 68 |
+
i = order[0]
|
| 69 |
+
keep.append(dets[i])
|
| 70 |
+
ovr = inter_areas[i][order[1:]] / (areas[i] + areas[order[1:]] - inter_areas[i][order[1:]])
|
| 71 |
+
inds = np.where(ovr <= thresh)[0]
|
| 72 |
+
order = order[inds + 1]
|
| 73 |
+
|
| 74 |
+
return keep
|
| 75 |
+
'''
|
pix2text/doc_xl_layout/huntie_subfield.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch.utils.data as data
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Huntie_Subfield(data.Dataset):
|
| 6 |
+
num_classes = 13
|
| 7 |
+
num_secondary_classes = 3
|
| 8 |
+
default_resolution = [768, 768]
|
| 9 |
+
mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32).reshape(1, 1, 3)
|
| 10 |
+
std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32).reshape(1, 1, 3)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
pix2text/doc_xl_layout/opts.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import absolute_import
|
| 2 |
+
from __future__ import division
|
| 3 |
+
from __future__ import print_function
|
| 4 |
+
|
| 5 |
+
import argparse
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class opts(object):
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self.parser = argparse.ArgumentParser()
|
| 12 |
+
# basic experiment setting
|
| 13 |
+
self.parser.add_argument('task', default='ctdet',
|
| 14 |
+
help='ctdet | ddd | multi_pose | exdet | ctdet_subfield')
|
| 15 |
+
self.parser.add_argument('--dataset', default='huntie',
|
| 16 |
+
help='coco | kitti | coco_hp | pascal | huntie | structure')
|
| 17 |
+
self.parser.add_argument('--test', action='store_true')
|
| 18 |
+
self.parser.add_argument('--data_src', default="default", type=str,
|
| 19 |
+
help='The path of input data.')
|
| 20 |
+
self.parser.add_argument('--exp_id', default='default', type=str)
|
| 21 |
+
self.parser.add_argument('--vis_corner', type=int, default=0,
|
| 22 |
+
help='vis corner or not'
|
| 23 |
+
'0: do not vis corner'
|
| 24 |
+
'1: vis corner')
|
| 25 |
+
self.parser.add_argument('--convert_onnx', type=int, default=0,
|
| 26 |
+
help='0: donot convert'
|
| 27 |
+
'1: convert pytorch model to onnx')
|
| 28 |
+
self.parser.add_argument('--onnx_path', type=str, default="auto",
|
| 29 |
+
help='path of output onnx file.')
|
| 30 |
+
self.parser.add_argument('--debug', type=int, default=0,
|
| 31 |
+
help='level of visualization.'
|
| 32 |
+
'1: only show the final detection results'
|
| 33 |
+
'2: show the network output features'
|
| 34 |
+
'3: use matplot to display' # useful when lunching training with ipython notebook
|
| 35 |
+
'4: save all visualizations to disk')
|
| 36 |
+
self.parser.add_argument('--load_model', default='',
|
| 37 |
+
help='path to pretrained model')
|
| 38 |
+
self.parser.add_argument('--resume', action='store_true',
|
| 39 |
+
help='resume an experiment. '
|
| 40 |
+
'Reloaded the optimizer parameter and '
|
| 41 |
+
'set load_model to model_last.pth '
|
| 42 |
+
'in the exp dir if load_model is empty.')
|
| 43 |
+
|
| 44 |
+
# system
|
| 45 |
+
self.parser.add_argument('--gpus', default='-1',
|
| 46 |
+
help='-1 for CPU, use comma for multiple gpus')
|
| 47 |
+
self.parser.add_argument('--num_workers', type=int, default=16,
|
| 48 |
+
help='dataloader threads. 0 for single-thread.')
|
| 49 |
+
self.parser.add_argument('--not_cuda_benchmark', action='store_true',
|
| 50 |
+
help='disable when the input size is not fixed.')
|
| 51 |
+
self.parser.add_argument('--seed', type=int, default=317,
|
| 52 |
+
help='random seed') # from CornerNet
|
| 53 |
+
|
| 54 |
+
# log
|
| 55 |
+
self.parser.add_argument('--print_iter', type=int, default=0,
|
| 56 |
+
help='disable progress bar and print to screen.')
|
| 57 |
+
self.parser.add_argument('--hide_data_time', action='store_true',
|
| 58 |
+
help='not display time during training.')
|
| 59 |
+
self.parser.add_argument('--save_all', action='store_true',
|
| 60 |
+
help='save model to disk every 5 epochs.')
|
| 61 |
+
self.parser.add_argument('--metric', default='loss',
|
| 62 |
+
help='main metric to save best model')
|
| 63 |
+
self.parser.add_argument('--vis_thresh', type=float, default=0.3,
|
| 64 |
+
help='visualization threshold.')
|
| 65 |
+
self.parser.add_argument('--nms_thresh', type=float, default=0.3,
|
| 66 |
+
help='nms threshold.')
|
| 67 |
+
self.parser.add_argument('--corner_thresh', type=float, default=0.3,
|
| 68 |
+
help='threshold for corner.')
|
| 69 |
+
self.parser.add_argument('--debugger_theme', default='white',
|
| 70 |
+
choices=['white', 'black'])
|
| 71 |
+
|
| 72 |
+
# model
|
| 73 |
+
self.parser.add_argument('--arch', default='dla_34',
|
| 74 |
+
help='model architecture. Currently tested'
|
| 75 |
+
'res_18 | res_101 | resdcn_18 | resdcn_101 |'
|
| 76 |
+
'dlav0_34 | dla_34 | hourglass')
|
| 77 |
+
self.parser.add_argument('--head_conv', type=int, default=-1,
|
| 78 |
+
help='conv layer channels for output head'
|
| 79 |
+
'0 for no conv layer'
|
| 80 |
+
'-1 for default setting: '
|
| 81 |
+
'64 for resnets and 256 for dla.')
|
| 82 |
+
self.parser.add_argument('--down_ratio', type=int, default=4,
|
| 83 |
+
help='output stride. Currently only supports 4.')
|
| 84 |
+
|
| 85 |
+
# input
|
| 86 |
+
self.parser.add_argument('--input_res', type=int, default=-1,
|
| 87 |
+
help='input height and width. -1 for default from '
|
| 88 |
+
'dataset. Will be overriden by input_h | input_w')
|
| 89 |
+
self.parser.add_argument('--input_h', type=int, default=-1,
|
| 90 |
+
help='input height. -1 for default from dataset.')
|
| 91 |
+
self.parser.add_argument('--input_w', type=int, default=-1,
|
| 92 |
+
help='input width. -1 for default from dataset.')
|
| 93 |
+
|
| 94 |
+
# train
|
| 95 |
+
self.parser.add_argument('--lr', type=float, default=1.25e-4,
|
| 96 |
+
help='learning rate for batch size 32.')
|
| 97 |
+
self.parser.add_argument('--lr_step', type=str, default='80',
|
| 98 |
+
help='drop learning rate by 10.')
|
| 99 |
+
self.parser.add_argument('--NotFixList', type=str, default='',
|
| 100 |
+
help='not fix layer name.')
|
| 101 |
+
self.parser.add_argument('--num_epochs', type=int, default=90,
|
| 102 |
+
help='total training epochs.')
|
| 103 |
+
self.parser.add_argument('--batch_size', type=int, default=32,
|
| 104 |
+
help='batch size')
|
| 105 |
+
self.parser.add_argument('--master_batch_size', type=int, default=-1,
|
| 106 |
+
help='batch size on the master gpu.')
|
| 107 |
+
self.parser.add_argument('--num_iters', type=int, default=-1,
|
| 108 |
+
help='default: #samples / batch_size.')
|
| 109 |
+
self.parser.add_argument('--val_intervals', type=int, default=5,
|
| 110 |
+
help='number of epochs to run validation.')
|
| 111 |
+
self.parser.add_argument('--trainval', action='store_true',
|
| 112 |
+
help='include validation in training and test on test set')
|
| 113 |
+
self.parser.add_argument('--negative', action='store_true',
|
| 114 |
+
help='flip data augmentation.')
|
| 115 |
+
self.parser.add_argument('--adamW', action='store_true',
|
| 116 |
+
help='using adamW or adam.')
|
| 117 |
+
|
| 118 |
+
# test
|
| 119 |
+
self.parser.add_argument('--save_dir', default="default", type=str,
|
| 120 |
+
help='The path of output data.')
|
| 121 |
+
self.parser.add_argument('--flip_test', action='store_true',
|
| 122 |
+
help='flip data augmentation.')
|
| 123 |
+
self.parser.add_argument('--test_scales', type=str, default='1',
|
| 124 |
+
help='multi scale test augmentation.')
|
| 125 |
+
self.parser.add_argument('--nms', action='store_false',
|
| 126 |
+
help='run nms in testing.')
|
| 127 |
+
self.parser.add_argument('--K', type=int, default=100,
|
| 128 |
+
help='max number of output objects.')
|
| 129 |
+
self.parser.add_argument('--fix_res', action='store_true',
|
| 130 |
+
help='fix testing resolution or keep the original resolution')
|
| 131 |
+
self.parser.add_argument('--keep_res', action='store_true',
|
| 132 |
+
help='keep the original resolution during validation.')
|
| 133 |
+
|
| 134 |
+
# dataset
|
| 135 |
+
self.parser.add_argument('--not_rand_crop', action='store_true',
|
| 136 |
+
help='not use the random crop data augmentation from CornerNet.')
|
| 137 |
+
self.parser.add_argument('--shift', type=float, default=0.1,
|
| 138 |
+
help='when not using random crop apply shift augmentation.')
|
| 139 |
+
self.parser.add_argument('--scale', type=float, default=0.4,
|
| 140 |
+
help='when not using random crop apply scale augmentation.')
|
| 141 |
+
self.parser.add_argument('--rotate', type=float, default=0,
|
| 142 |
+
help='when not using random crop apply rotation augmentation.')
|
| 143 |
+
self.parser.add_argument('--flip', type=float, default=0.5,
|
| 144 |
+
help='probability of applying flip augmentation.')
|
| 145 |
+
self.parser.add_argument('--maskvisual', type=float, default=0.,
|
| 146 |
+
help='probability of masking image.')
|
| 147 |
+
self.parser.add_argument('--maskgrid', type=float, default=0.,
|
| 148 |
+
help='probability of masking grid, only available when visual is not masked.')
|
| 149 |
+
self.parser.add_argument('--no_color_aug', action='store_true',
|
| 150 |
+
help='not use the color augmenation from CornerNet')
|
| 151 |
+
self.parser.add_argument('--MK', default=500,
|
| 152 |
+
help='max corner number')
|
| 153 |
+
self.parser.add_argument('--rot', action='store_false',
|
| 154 |
+
help='rotate image')
|
| 155 |
+
self.parser.add_argument('--warp', action='store_false',
|
| 156 |
+
help='warp image')
|
| 157 |
+
self.parser.add_argument('--normal_padding', action='store_false',
|
| 158 |
+
help='normal_padding image')
|
| 159 |
+
self.parser.add_argument('--extra_channel', action='store_true',
|
| 160 |
+
help='concat edge channel to the input image')
|
| 161 |
+
self.parser.add_argument('--init_emb', type=str, default='',
|
| 162 |
+
help='embedding layer.')
|
| 163 |
+
self.parser.add_argument('--grid_type', type=str, default='char_point',
|
| 164 |
+
help='type of grid, candidates: char_point, char_box (CharGrid), line (WordGrid).')
|
| 165 |
+
self.parser.add_argument('--finetune_emb', action='store_true',
|
| 166 |
+
help='embedding finetune')
|
| 167 |
+
self.parser.add_argument('--dic', type=str, default='',
|
| 168 |
+
help='dic file for grid.')
|
| 169 |
+
self.parser.add_argument('--sample_limit', type=int, default=-1,
|
| 170 |
+
help='limit samples for training')
|
| 171 |
+
|
| 172 |
+
# multi_pose
|
| 173 |
+
self.parser.add_argument('--aug_rot', type=float, default=0,
|
| 174 |
+
help='probability of applying rotation augmentation.')
|
| 175 |
+
# ddd
|
| 176 |
+
self.parser.add_argument('--aug_ddd', type=float, default=0.5,
|
| 177 |
+
help='probability of applying crop augmentation.')
|
| 178 |
+
self.parser.add_argument('--rect_mask', action='store_true',
|
| 179 |
+
help='for ignored object, apply mask on the '
|
| 180 |
+
'rectangular region or just center point.')
|
| 181 |
+
self.parser.add_argument('--kitti_split', default='3dop',
|
| 182 |
+
help='different validation split for kitti: '
|
| 183 |
+
'3dop | subcnn')
|
| 184 |
+
|
| 185 |
+
# loss
|
| 186 |
+
self.parser.add_argument('--mse_loss', action='store_true',
|
| 187 |
+
help='use mse loss or focal loss to train keypoint heatmaps.')
|
| 188 |
+
# ctdet
|
| 189 |
+
self.parser.add_argument('--num_classes', type=int, default=-1,
|
| 190 |
+
help='the number of main category. -1 means use default from dataset.')
|
| 191 |
+
self.parser.add_argument('--num_secondary_classes', type=int, default=-1,
|
| 192 |
+
help='the number of secondary category. -1 means use default from dataset.')
|
| 193 |
+
self.parser.add_argument('--reg_loss', default='l1',
|
| 194 |
+
help='regression loss: sl1 | l1 | l2')
|
| 195 |
+
self.parser.add_argument('--hm_weight', type=float, default=1,
|
| 196 |
+
help='loss weight for keypoint heatmaps.')
|
| 197 |
+
self.parser.add_argument('--cls_weight', type=float, default=1,
|
| 198 |
+
help='loss weight for keypoint heatmaps.')
|
| 199 |
+
self.parser.add_argument('--ftype_weight', type=float, default=1,
|
| 200 |
+
help='loss weight for keypoint heatmaps.')
|
| 201 |
+
self.parser.add_argument('--mk_weight', type=float, default=1,
|
| 202 |
+
help='loss weight for corner keypoint heatmaps.')
|
| 203 |
+
self.parser.add_argument('--off_weight', type=float, default=1,
|
| 204 |
+
help='loss weight for keypoint local offsets.')
|
| 205 |
+
self.parser.add_argument('--wh_weight', type=float, default=1,
|
| 206 |
+
help='loss weight for bounding box size.')
|
| 207 |
+
# multi_pose
|
| 208 |
+
self.parser.add_argument('--hp_weight', type=float, default=1,
|
| 209 |
+
help='loss weight for human pose offset.')
|
| 210 |
+
self.parser.add_argument('--hm_hp_weight', type=float, default=1,
|
| 211 |
+
help='loss weight for human keypoint heatmap.')
|
| 212 |
+
# ddd
|
| 213 |
+
self.parser.add_argument('--dep_weight', type=float, default=1,
|
| 214 |
+
help='loss weight for depth.')
|
| 215 |
+
self.parser.add_argument('--dim_weight', type=float, default=1,
|
| 216 |
+
help='loss weight for 3d bounding box size.')
|
| 217 |
+
self.parser.add_argument('--rot_weight', type=float, default=1,
|
| 218 |
+
help='loss weight for orientation.')
|
| 219 |
+
self.parser.add_argument('--peak_thresh', type=float, default=0.1)
|
| 220 |
+
|
| 221 |
+
# task
|
| 222 |
+
# ctdet
|
| 223 |
+
self.parser.add_argument('--norm_wh', action='store_true',
|
| 224 |
+
help='L1(\hat(y) / y, 1) or L1(\hat(y), y)')
|
| 225 |
+
self.parser.add_argument('--dense_wh', action='store_true',
|
| 226 |
+
help='apply weighted regression near center or '
|
| 227 |
+
'just apply regression on center point.')
|
| 228 |
+
self.parser.add_argument('--cat_spec_wh', action='store_true',
|
| 229 |
+
help='category specific bounding box size.')
|
| 230 |
+
self.parser.add_argument('--not_reg_offset', action='store_true',
|
| 231 |
+
help='not regress local offset.')
|
| 232 |
+
# exdet
|
| 233 |
+
self.parser.add_argument('--agnostic_ex', action='store_true',
|
| 234 |
+
help='use category agnostic extreme points.')
|
| 235 |
+
self.parser.add_argument('--scores_thresh', type=float, default=0.3,
|
| 236 |
+
help='threshold for extreme point heatmap.')
|
| 237 |
+
self.parser.add_argument('--center_thresh', type=float, default=0.3,
|
| 238 |
+
help='threshold for centermap.')
|
| 239 |
+
self.parser.add_argument('--aggr_weight', type=float, default=0.0,
|
| 240 |
+
help='edge aggregation weight.')
|
| 241 |
+
# multi_pose
|
| 242 |
+
self.parser.add_argument('--dense_hp', action='store_true',
|
| 243 |
+
help='apply weighted pose regression near center '
|
| 244 |
+
'or just apply regression on center point.')
|
| 245 |
+
self.parser.add_argument('--not_hm_hp', action='store_true',
|
| 246 |
+
help='not estimate human joint heatmap, '
|
| 247 |
+
'directly use the joint offset from center.')
|
| 248 |
+
self.parser.add_argument('--not_reg_hp_offset', action='store_true',
|
| 249 |
+
help='not regress local offset for '
|
| 250 |
+
'human joint heatmaps.')
|
| 251 |
+
self.parser.add_argument('--not_reg_bbox', action='store_true',
|
| 252 |
+
help='not regression bounding box size.')
|
| 253 |
+
|
| 254 |
+
# ground truth validation
|
| 255 |
+
self.parser.add_argument('--eval_oracle_hm', action='store_true',
|
| 256 |
+
help='use ground center heatmap.')
|
| 257 |
+
self.parser.add_argument('--eval_oracle_mk', action='store_true',
|
| 258 |
+
help='use ground corner heatmap.')
|
| 259 |
+
self.parser.add_argument('--eval_oracle_wh', action='store_true',
|
| 260 |
+
help='use ground truth bounding box size.')
|
| 261 |
+
self.parser.add_argument('--eval_oracle_offset', action='store_true',
|
| 262 |
+
help='use ground truth local heatmap offset.')
|
| 263 |
+
self.parser.add_argument('--eval_oracle_kps', action='store_true',
|
| 264 |
+
help='use ground truth human pose offset.')
|
| 265 |
+
self.parser.add_argument('--eval_oracle_hmhp', action='store_true',
|
| 266 |
+
help='use ground truth human joint heatmaps.')
|
| 267 |
+
self.parser.add_argument('--eval_oracle_hp_offset', action='store_true',
|
| 268 |
+
help='use ground truth human joint local offset.')
|
| 269 |
+
self.parser.add_argument('--eval_oracle_dep', action='store_true',
|
| 270 |
+
help='use ground truth depth.')
|
| 271 |
+
|
| 272 |
+
def parse(self, args=None):
|
| 273 |
+
if isinstance(args, dict):
|
| 274 |
+
task_name = args.get("task", "ctdet")
|
| 275 |
+
opt = self.parser.parse_args(args=[task_name])
|
| 276 |
+
opt.__dict__.update(args)
|
| 277 |
+
else:
|
| 278 |
+
opt = self.parser.parse_args(args=args)
|
| 279 |
+
|
| 280 |
+
# import json
|
| 281 |
+
# with open("task_config.json", "w") as f:
|
| 282 |
+
# json.dump(opt.__dict__, f, ensure_ascii=False, indent=4)
|
| 283 |
+
|
| 284 |
+
opt.gpus_str = opt.gpus
|
| 285 |
+
opt.gpus = [int(gpu) for gpu in opt.gpus.split(',')]
|
| 286 |
+
opt.gpus = [i for i in range(len(opt.gpus))] if opt.gpus[0] >= 0 else [-1]
|
| 287 |
+
opt.lr_step = [int(i) for i in opt.lr_step.split(',')]
|
| 288 |
+
opt.test_scales = [float(i) for i in opt.test_scales.split(',')]
|
| 289 |
+
|
| 290 |
+
opt.fix_res = not opt.keep_res
|
| 291 |
+
print('Fix size testing.' if opt.fix_res else 'Keep resolution testing.')
|
| 292 |
+
opt.reg_offset = not opt.not_reg_offset
|
| 293 |
+
opt.reg_bbox = not opt.not_reg_bbox
|
| 294 |
+
opt.hm_hp = not opt.not_hm_hp
|
| 295 |
+
opt.reg_hp_offset = (not opt.not_reg_hp_offset) and opt.hm_hp
|
| 296 |
+
|
| 297 |
+
if opt.head_conv == -1: # init default head_conv
|
| 298 |
+
opt.head_conv = 256 if 'dla' in opt.arch else 64
|
| 299 |
+
opt.pad = 0 # opt.pad = 127 if 'hourglass' in opt.arch else 31
|
| 300 |
+
opt.num_stacks = 2 if opt.arch == 'hourglass' else 1
|
| 301 |
+
|
| 302 |
+
if opt.trainval:
|
| 303 |
+
opt.val_intervals = 100000000
|
| 304 |
+
|
| 305 |
+
if opt.debug > 0:
|
| 306 |
+
opt.num_workers = 0
|
| 307 |
+
opt.batch_size = 1
|
| 308 |
+
opt.gpus = [opt.gpus[0]]
|
| 309 |
+
opt.master_batch_size = -1
|
| 310 |
+
|
| 311 |
+
if opt.master_batch_size == -1:
|
| 312 |
+
opt.master_batch_size = opt.batch_size // len(opt.gpus)
|
| 313 |
+
rest_batch_size = (opt.batch_size - opt.master_batch_size)
|
| 314 |
+
opt.chunk_sizes = [opt.master_batch_size]
|
| 315 |
+
for i in range(len(opt.gpus) - 1):
|
| 316 |
+
slave_chunk_size = rest_batch_size // (len(opt.gpus) - 1)
|
| 317 |
+
if i < rest_batch_size % (len(opt.gpus) - 1):
|
| 318 |
+
slave_chunk_size += 1
|
| 319 |
+
opt.chunk_sizes.append(slave_chunk_size)
|
| 320 |
+
print('training chunk_sizes:', opt.chunk_sizes)
|
| 321 |
+
|
| 322 |
+
opt.root_dir = os.path.join(os.path.dirname(__file__), '..', '..')
|
| 323 |
+
opt.data_dir = os.path.join(opt.root_dir, 'data') if opt.data_src == "default" else opt.data_src
|
| 324 |
+
opt.exp_dir = os.path.join(opt.root_dir, 'exp', opt.task)
|
| 325 |
+
# import pdb; pdb.set_trace()
|
| 326 |
+
opt.save_dir = os.path.join(opt.exp_dir, opt.exp_id) if opt.save_dir == "default" else os.path.join(opt.save_dir, opt.exp_id)
|
| 327 |
+
opt.debug_dir = os.path.join(opt.save_dir, 'debug')
|
| 328 |
+
print('The output will be saved to ', opt.save_dir)
|
| 329 |
+
|
| 330 |
+
if opt.resume and opt.load_model == '':
|
| 331 |
+
model_path = opt.save_dir[:-4] if opt.save_dir.endswith('TEST') \
|
| 332 |
+
else opt.save_dir
|
| 333 |
+
opt.load_model = os.path.join(model_path, 'model_last.pth')
|
| 334 |
+
return opt
|
| 335 |
+
|
| 336 |
+
def update_dataset_info_and_set_heads(self, opt, dataset):
|
| 337 |
+
input_h, input_w = dataset.default_resolution
|
| 338 |
+
opt.mean, opt.std = dataset.mean, dataset.std
|
| 339 |
+
|
| 340 |
+
if opt.num_classes == -1:
|
| 341 |
+
opt.num_classes = dataset.num_classes
|
| 342 |
+
if opt.num_secondary_classes == -1:
|
| 343 |
+
opt.num_secondary_classes = dataset.num_secondary_classes
|
| 344 |
+
|
| 345 |
+
# input_h(w): opt.input_h overrides opt.input_res overrides dataset default
|
| 346 |
+
input_h = opt.input_res if opt.input_res > 0 else input_h
|
| 347 |
+
input_w = opt.input_res if opt.input_res > 0 else input_w
|
| 348 |
+
opt.input_h = opt.input_h if opt.input_h > 0 else input_h
|
| 349 |
+
opt.input_w = opt.input_w if opt.input_w > 0 else input_w
|
| 350 |
+
opt.output_h = opt.input_h // opt.down_ratio
|
| 351 |
+
opt.output_w = opt.input_w // opt.down_ratio
|
| 352 |
+
opt.input_res = max(opt.input_h, opt.input_w)
|
| 353 |
+
opt.output_res = max(opt.output_h, opt.output_w)
|
| 354 |
+
|
| 355 |
+
if opt.task == 'exdet':
|
| 356 |
+
# assert opt.dataset in ['coco']
|
| 357 |
+
num_hm = 1 if opt.agnostic_ex else opt.num_classes
|
| 358 |
+
opt.heads = {'hm_t': num_hm, 'hm_l': num_hm,
|
| 359 |
+
'hm_b': num_hm, 'hm_r': num_hm,
|
| 360 |
+
'hm_c': opt.num_classes}
|
| 361 |
+
if opt.reg_offset:
|
| 362 |
+
opt.heads.update({'reg_t': 2, 'reg_l': 2, 'reg_b': 2, 'reg_r': 2})
|
| 363 |
+
elif opt.task == 'ddd':
|
| 364 |
+
# assert opt.dataset in ['gta', 'kitti', 'viper']
|
| 365 |
+
opt.heads = {'hm': opt.num_classes, 'dep': 1, 'rot': 8, 'dim': 3}
|
| 366 |
+
if opt.reg_bbox:
|
| 367 |
+
opt.heads.update(
|
| 368 |
+
{'wh': 2})
|
| 369 |
+
if opt.reg_offset:
|
| 370 |
+
opt.heads.update({'reg': 2})
|
| 371 |
+
elif opt.task == 'ctdet':
|
| 372 |
+
# assert opt.dataset in ['pascal', 'coco']
|
| 373 |
+
opt.heads = {'hm': opt.num_classes, 'cls': 4, 'ftype': opt.num_secondary_classes,
|
| 374 |
+
'wh': 8 if not opt.cat_spec_wh else 8 * opt.num_classes}
|
| 375 |
+
if opt.reg_offset:
|
| 376 |
+
opt.heads.update({'reg': 2})
|
| 377 |
+
elif opt.task == 'ctdet_dualmodal':
|
| 378 |
+
# assert opt.dataset in ['pascal', 'coco']
|
| 379 |
+
opt.heads = {'hm': opt.num_classes, 'cls': 4, 'ftype': opt.num_secondary_classes,
|
| 380 |
+
'wh': 8 if not opt.cat_spec_wh else 8 * opt.num_classes}
|
| 381 |
+
if opt.reg_offset:
|
| 382 |
+
opt.heads.update({'reg': 2})
|
| 383 |
+
elif opt.task == 'multi_pose':
|
| 384 |
+
# assert opt.dataset in ['coco_hp']
|
| 385 |
+
opt.flip_idx = dataset.flip_idx
|
| 386 |
+
opt.heads = {'hm': opt.num_classes, 'wh': 2, 'hps': 34}
|
| 387 |
+
if opt.reg_offset:
|
| 388 |
+
opt.heads.update({'reg': 2})
|
| 389 |
+
if opt.hm_hp:
|
| 390 |
+
opt.heads.update({'hm_hp': 17})
|
| 391 |
+
if opt.reg_hp_offset:
|
| 392 |
+
opt.heads.update({'hp_offset': 2})
|
| 393 |
+
elif opt.task == 'ctdet_subfield':
|
| 394 |
+
# assert opt.dataset in ['pascal', 'coco']
|
| 395 |
+
opt.heads = {'hm': opt.num_classes-2, 'cls': 4, 'ftype': opt.num_secondary_classes,
|
| 396 |
+
'wh': 8 if not opt.cat_spec_wh else 8 * opt.num_classes, 'hm_sub': 2, 'wh_sub': 8 }
|
| 397 |
+
if opt.reg_offset:
|
| 398 |
+
opt.heads.update({'reg': 2})
|
| 399 |
+
opt.heads.update({'reg_sub': 2})
|
| 400 |
+
else:
|
| 401 |
+
assert 0, 'task not defined!'
|
| 402 |
+
print('heads', opt.heads)
|
| 403 |
+
return opt
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
if __name__ == '__main__':
|
| 407 |
+
print("Testing config ... ")
|
| 408 |
+
config_dict = {"batch_size": 32, "dataset": "huntie"}
|
| 409 |
+
opt = opts().parse(args=config_dict)
|
| 410 |
+
print(opt.__dict__)
|
pix2text/doc_xl_layout/utils/__init__.py
ADDED
|
File without changes
|
pix2text/doc_xl_layout/utils/ddd_utils.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import absolute_import
|
| 2 |
+
from __future__ import division
|
| 3 |
+
from __future__ import print_function
|
| 4 |
+
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def compute_box_3d(dim, location, rotation_y):
|
| 10 |
+
# dim: 3
|
| 11 |
+
# location: 3
|
| 12 |
+
# rotation_y: 1
|
| 13 |
+
# return: 8 x 3
|
| 14 |
+
c, s = np.cos(rotation_y), np.sin(rotation_y)
|
| 15 |
+
R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32)
|
| 16 |
+
l, w, h = dim[2], dim[1], dim[0]
|
| 17 |
+
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
|
| 18 |
+
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
|
| 19 |
+
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
|
| 20 |
+
|
| 21 |
+
corners = np.array([x_corners, y_corners, z_corners], dtype=np.float32)
|
| 22 |
+
corners_3d = np.dot(R, corners)
|
| 23 |
+
corners_3d = corners_3d + np.array(location, dtype=np.float32).reshape(3, 1)
|
| 24 |
+
return corners_3d.transpose(1, 0)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def project_to_image(pts_3d, P):
|
| 28 |
+
# pts_3d: n x 3
|
| 29 |
+
# P: 3 x 4
|
| 30 |
+
# return: n x 2
|
| 31 |
+
pts_3d_homo = np.concatenate(
|
| 32 |
+
[pts_3d, np.ones((pts_3d.shape[0], 1), dtype=np.float32)], axis=1)
|
| 33 |
+
pts_2d = np.dot(P, pts_3d_homo.transpose(1, 0)).transpose(1, 0)
|
| 34 |
+
pts_2d = pts_2d[:, :2] / pts_2d[:, 2:]
|
| 35 |
+
# import pdb; pdb.set_trace()
|
| 36 |
+
return pts_2d
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def compute_orientation_3d(dim, location, rotation_y):
|
| 40 |
+
# dim: 3
|
| 41 |
+
# location: 3
|
| 42 |
+
# rotation_y: 1
|
| 43 |
+
# return: 2 x 3
|
| 44 |
+
c, s = np.cos(rotation_y), np.sin(rotation_y)
|
| 45 |
+
R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32)
|
| 46 |
+
orientation_3d = np.array([[0, dim[2]], [0, 0], [0, 0]], dtype=np.float32)
|
| 47 |
+
orientation_3d = np.dot(R, orientation_3d)
|
| 48 |
+
orientation_3d = orientation_3d + \
|
| 49 |
+
np.array(location, dtype=np.float32).reshape(3, 1)
|
| 50 |
+
return orientation_3d.transpose(1, 0)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def draw_box_3d(image, corners, c=(0, 0, 255)):
|
| 54 |
+
face_idx = [[0, 1, 5, 4],
|
| 55 |
+
[1, 2, 6, 5],
|
| 56 |
+
[2, 3, 7, 6],
|
| 57 |
+
[3, 0, 4, 7]]
|
| 58 |
+
for ind_f in range(3, -1, -1):
|
| 59 |
+
f = face_idx[ind_f]
|
| 60 |
+
for j in range(4):
|
| 61 |
+
cv2.line(image, (corners[f[j], 0], corners[f[j], 1]),
|
| 62 |
+
(corners[f[(j + 1) % 4], 0], corners[f[(j + 1) % 4], 1]), c, 2, lineType=cv2.LINE_AA)
|
| 63 |
+
if ind_f == 0:
|
| 64 |
+
cv2.line(image, (corners[f[0], 0], corners[f[0], 1]),
|
| 65 |
+
(corners[f[2], 0], corners[f[2], 1]), c, 1, lineType=cv2.LINE_AA)
|
| 66 |
+
cv2.line(image, (corners[f[1], 0], corners[f[1], 1]),
|
| 67 |
+
(corners[f[3], 0], corners[f[3], 1]), c, 1, lineType=cv2.LINE_AA)
|
| 68 |
+
return image
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def unproject_2d_to_3d(pt_2d, depth, P):
|
| 72 |
+
# pts_2d: 2
|
| 73 |
+
# depth: 1
|
| 74 |
+
# P: 3 x 4
|
| 75 |
+
# return: 3
|
| 76 |
+
z = depth - P[2, 3]
|
| 77 |
+
x = (pt_2d[0] * depth - P[0, 3] - P[0, 2] * z) / P[0, 0]
|
| 78 |
+
y = (pt_2d[1] * depth - P[1, 3] - P[1, 2] * z) / P[1, 1]
|
| 79 |
+
pt_3d = np.array([x, y, z], dtype=np.float32)
|
| 80 |
+
return pt_3d
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def alpha2rot_y(alpha, x, cx, fx):
|
| 84 |
+
"""
|
| 85 |
+
Get rotation_y by alpha + theta - 180
|
| 86 |
+
alpha : Observation angle of object, ranging [-pi..pi]
|
| 87 |
+
x : Object center x to the camera center (x-W/2), in pixels
|
| 88 |
+
rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi]
|
| 89 |
+
"""
|
| 90 |
+
rot_y = alpha + np.arctan2(x - cx, fx)
|
| 91 |
+
if rot_y > np.pi:
|
| 92 |
+
rot_y -= 2 * np.pi
|
| 93 |
+
if rot_y < -np.pi:
|
| 94 |
+
rot_y += 2 * np.pi
|
| 95 |
+
return rot_y
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def rot_y2alpha(rot_y, x, cx, fx):
|
| 99 |
+
"""
|
| 100 |
+
Get rotation_y by alpha + theta - 180
|
| 101 |
+
alpha : Observation angle of object, ranging [-pi..pi]
|
| 102 |
+
x : Object center x to the camera center (x-W/2), in pixels
|
| 103 |
+
rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi]
|
| 104 |
+
"""
|
| 105 |
+
alpha = rot_y - np.arctan2(x - cx, fx)
|
| 106 |
+
if alpha > np.pi:
|
| 107 |
+
alpha -= 2 * np.pi
|
| 108 |
+
if alpha < -np.pi:
|
| 109 |
+
alpha += 2 * np.pi
|
| 110 |
+
return alpha
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def ddd2locrot(center, alpha, dim, depth, calib):
|
| 114 |
+
# single image
|
| 115 |
+
locations = unproject_2d_to_3d(center, depth, calib)
|
| 116 |
+
locations[1] += dim[0] / 2
|
| 117 |
+
rotation_y = alpha2rot_y(alpha, center[0], calib[0, 2], calib[0, 0])
|
| 118 |
+
return locations, rotation_y
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def project_3d_bbox(location, dim, rotation_y, calib):
|
| 122 |
+
box_3d = compute_box_3d(dim, location, rotation_y)
|
| 123 |
+
box_2d = project_to_image(box_3d, calib)
|
| 124 |
+
return box_2d
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
if __name__ == '__main__':
|
| 128 |
+
calib = np.array(
|
| 129 |
+
[[7.070493000000e+02, 0.000000000000e+00, 6.040814000000e+02, 4.575831000000e+01],
|
| 130 |
+
[0.000000000000e+00, 7.070493000000e+02, 1.805066000000e+02, -3.454157000000e-01],
|
| 131 |
+
[0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 4.981016000000e-03]],
|
| 132 |
+
dtype=np.float32)
|
| 133 |
+
alpha = -0.20
|
| 134 |
+
tl = np.array([712.40, 143.00], dtype=np.float32)
|
| 135 |
+
br = np.array([810.73, 307.92], dtype=np.float32)
|
| 136 |
+
ct = (tl + br) / 2
|
| 137 |
+
rotation_y = 0.01
|
| 138 |
+
print('alpha2rot_y', alpha2rot_y(alpha, ct[0], calib[0, 2], calib[0, 0]))
|
| 139 |
+
print('rotation_y', rotation_y)
|
pix2text/doc_xl_layout/utils/debugger.py
ADDED
|
@@ -0,0 +1,606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import absolute_import
|
| 2 |
+
from __future__ import division
|
| 3 |
+
from __future__ import print_function
|
| 4 |
+
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from .ddd_utils import compute_box_3d, project_to_image, draw_box_3d
|
| 9 |
+
|
| 10 |
+
class Debugger(object):
|
| 11 |
+
def __init__(self, ipynb=False, theme='black',
|
| 12 |
+
num_classes=-1, dataset=None, down_ratio=4):
|
| 13 |
+
self.ipynb = ipynb
|
| 14 |
+
if not self.ipynb:
|
| 15 |
+
import matplotlib.pyplot as plt
|
| 16 |
+
self.plt = plt
|
| 17 |
+
self.imgs = {}
|
| 18 |
+
self.theme = theme
|
| 19 |
+
colors = [(color_list[_]).astype(np.uint8) \
|
| 20 |
+
for _ in range(len(color_list))]
|
| 21 |
+
self.colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 1, 1, 3)
|
| 22 |
+
if self.theme == 'white':
|
| 23 |
+
self.colors = self.colors.reshape(-1)[::-1].reshape(len(colors), 1, 1, 3)
|
| 24 |
+
self.colors = np.clip(self.colors, 0., 0.6 * 255).astype(np.uint8)
|
| 25 |
+
self.dim_scale = 1
|
| 26 |
+
if dataset == 'coco_hp':
|
| 27 |
+
self.names = ['p']
|
| 28 |
+
self.num_class = 1
|
| 29 |
+
self.num_joints = 17
|
| 30 |
+
self.edges = [[0, 1], [0, 2], [1, 3], [2, 4],
|
| 31 |
+
[3, 5], [4, 6], [5, 6],
|
| 32 |
+
[5, 7], [7, 9], [6, 8], [8, 10],
|
| 33 |
+
[5, 11], [6, 12], [11, 12],
|
| 34 |
+
[11, 13], [13, 15], [12, 14], [14, 16]]
|
| 35 |
+
self.ec = [(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
|
| 36 |
+
(255, 0, 0), (0, 0, 255), (255, 0, 255),
|
| 37 |
+
(255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255),
|
| 38 |
+
(255, 0, 0), (0, 0, 255), (255, 0, 255),
|
| 39 |
+
(255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255)]
|
| 40 |
+
self.colors_hp = [(255, 0, 255), (255, 0, 0), (0, 0, 255),
|
| 41 |
+
(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
|
| 42 |
+
(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
|
| 43 |
+
(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
|
| 44 |
+
(255, 0, 0), (0, 0, 255)]
|
| 45 |
+
elif num_classes == 80 or dataset == 'coco':
|
| 46 |
+
self.names = coco_class_name
|
| 47 |
+
elif num_classes == 20 or dataset == 'pascal':
|
| 48 |
+
self.names = pascal_class_name
|
| 49 |
+
elif num_classes == 1 and dataset == 'table':
|
| 50 |
+
self.names = table_class_name
|
| 51 |
+
elif num_classes == 16 or dataset == 'huntie':
|
| 52 |
+
self.names = huntie_class_name
|
| 53 |
+
elif dataset == 'vehicle':
|
| 54 |
+
self.names = vehicle_class_name
|
| 55 |
+
elif num_classes == 2 or dataset == 'video':
|
| 56 |
+
self.names = video_class_name
|
| 57 |
+
elif dataset == 'gta':
|
| 58 |
+
self.names = gta_class_name
|
| 59 |
+
self.focal_length = 935.3074360871937
|
| 60 |
+
self.W = 1920
|
| 61 |
+
self.H = 1080
|
| 62 |
+
self.dim_scale = 3
|
| 63 |
+
elif dataset == 'viper':
|
| 64 |
+
self.names = gta_class_name
|
| 65 |
+
self.focal_length = 1158
|
| 66 |
+
self.W = 1920
|
| 67 |
+
self.H = 1080
|
| 68 |
+
self.dim_scale = 3
|
| 69 |
+
elif num_classes == 3 or dataset == 'kitti':
|
| 70 |
+
self.names = kitti_class_name
|
| 71 |
+
self.focal_length = 721.5377
|
| 72 |
+
self.W = 1242
|
| 73 |
+
self.H = 375
|
| 74 |
+
# num_classes = len(self.names)
|
| 75 |
+
self.down_ratio = down_ratio
|
| 76 |
+
# for bird view
|
| 77 |
+
self.world_size = 64
|
| 78 |
+
self.out_size = 384
|
| 79 |
+
|
| 80 |
+
def add_img(self, img, img_id='default', revert_color=False):
|
| 81 |
+
if revert_color:
|
| 82 |
+
img = 255 - img
|
| 83 |
+
self.imgs[img_id] = img.copy()
|
| 84 |
+
|
| 85 |
+
def add_mask(self, mask, bg, imgId='default', trans=0.8):
|
| 86 |
+
self.imgs[imgId] = (mask.reshape(
|
| 87 |
+
mask.shape[0], mask.shape[1], 1) * 255 * trans + \
|
| 88 |
+
bg * (1 - trans)).astype(np.uint8)
|
| 89 |
+
|
| 90 |
+
def show_img(self, pause=False, imgId='default'):
|
| 91 |
+
cv2.imshow('{}'.format(imgId), self.imgs[imgId])
|
| 92 |
+
if pause:
|
| 93 |
+
cv2.waitKey()
|
| 94 |
+
|
| 95 |
+
def add_blend_img(self, back, fore, img_id='blend', trans=0.7):
|
| 96 |
+
if self.theme == 'white':
|
| 97 |
+
fore = 255 - fore
|
| 98 |
+
if fore.shape[0] != back.shape[0] or fore.shape[0] != back.shape[1]:
|
| 99 |
+
fore = cv2.resize(fore, (back.shape[1], back.shape[0]))
|
| 100 |
+
if len(fore.shape) == 2:
|
| 101 |
+
fore = fore.reshape(fore.shape[0], fore.shape[1], 1)
|
| 102 |
+
self.imgs[img_id] = (back * (1. - trans) + fore * trans)
|
| 103 |
+
self.imgs[img_id][self.imgs[img_id] > 255] = 255
|
| 104 |
+
self.imgs[img_id][self.imgs[img_id] < 0] = 0
|
| 105 |
+
self.imgs[img_id] = self.imgs[img_id].astype(np.uint8).copy()
|
| 106 |
+
|
| 107 |
+
'''
|
| 108 |
+
# slow version
|
| 109 |
+
def gen_colormap(self, img, output_res=None):
|
| 110 |
+
# num_classes = len(self.colors)
|
| 111 |
+
img[img < 0] = 0
|
| 112 |
+
h, w = img.shape[1], img.shape[2]
|
| 113 |
+
if output_res is None:
|
| 114 |
+
output_res = (h * self.down_ratio, w * self.down_ratio)
|
| 115 |
+
color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8)
|
| 116 |
+
for i in range(img.shape[0]):
|
| 117 |
+
resized = cv2.resize(img[i], (output_res[1], output_res[0]))
|
| 118 |
+
resized = resized.reshape(output_res[0], output_res[1], 1)
|
| 119 |
+
cl = self.colors[i] if not (self.theme == 'white') \
|
| 120 |
+
else 255 - self.colors[i]
|
| 121 |
+
color_map = np.maximum(color_map, (resized * cl).astype(np.uint8))
|
| 122 |
+
return color_map
|
| 123 |
+
'''
|
| 124 |
+
|
| 125 |
+
def gen_colormap(self, img, output_res=None):
|
| 126 |
+
img = img.copy()
|
| 127 |
+
c, h, w = img.shape[0], img.shape[1], img.shape[2]
|
| 128 |
+
if output_res is None:
|
| 129 |
+
output_res = (h * self.down_ratio, w * self.down_ratio)
|
| 130 |
+
img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32)
|
| 131 |
+
colors = np.array(
|
| 132 |
+
self.colors, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3)
|
| 133 |
+
if self.theme == 'white':
|
| 134 |
+
colors = 255 - colors
|
| 135 |
+
color_map = (img * colors).max(axis=2).astype(np.uint8)
|
| 136 |
+
color_map = cv2.resize(color_map, (output_res[0], output_res[1]))
|
| 137 |
+
return color_map
|
| 138 |
+
|
| 139 |
+
'''
|
| 140 |
+
# slow
|
| 141 |
+
def gen_colormap_hp(self, img, output_res=None):
|
| 142 |
+
# num_classes = len(self.colors)
|
| 143 |
+
# img[img < 0] = 0
|
| 144 |
+
h, w = img.shape[1], img.shape[2]
|
| 145 |
+
if output_res is None:
|
| 146 |
+
output_res = (h * self.down_ratio, w * self.down_ratio)
|
| 147 |
+
color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8)
|
| 148 |
+
for i in range(img.shape[0]):
|
| 149 |
+
resized = cv2.resize(img[i], (output_res[1], output_res[0]))
|
| 150 |
+
resized = resized.reshape(output_res[0], output_res[1], 1)
|
| 151 |
+
cl = self.colors_hp[i] if not (self.theme == 'white') else \
|
| 152 |
+
(255 - np.array(self.colors_hp[i]))
|
| 153 |
+
color_map = np.maximum(color_map, (resized * cl).astype(np.uint8))
|
| 154 |
+
return color_map
|
| 155 |
+
'''
|
| 156 |
+
|
| 157 |
+
def gen_colormap_hp(self, img, output_res=None):
|
| 158 |
+
c, h, w = img.shape[0], img.shape[1], img.shape[2]
|
| 159 |
+
if output_res is None:
|
| 160 |
+
output_res = (h * self.down_ratio, w * self.down_ratio)
|
| 161 |
+
img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32)
|
| 162 |
+
colors = np.array(
|
| 163 |
+
self.colors_hp, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3)
|
| 164 |
+
if self.theme == 'white':
|
| 165 |
+
colors = 255 - colors
|
| 166 |
+
color_map = (img * colors).max(axis=2).astype(np.uint8)
|
| 167 |
+
color_map = cv2.resize(color_map, (output_res[0], output_res[1]))
|
| 168 |
+
return color_map
|
| 169 |
+
|
| 170 |
+
def add_rect(self, rect1, rect2, c, conf=1, img_id='default'):
|
| 171 |
+
cv2.rectangle(
|
| 172 |
+
self.imgs[img_id], (rect1[0], rect1[1]), (rect2[0], rect2[1]), c, 2)
|
| 173 |
+
if conf < 1:
|
| 174 |
+
cv2.circle(self.imgs[img_id], (rect1[0], rect1[1]), int(10 * conf), c, 1)
|
| 175 |
+
cv2.circle(self.imgs[img_id], (rect2[0], rect2[1]), int(10 * conf), c, 1)
|
| 176 |
+
cv2.circle(self.imgs[img_id], (rect1[0], rect2[1]), int(10 * conf), c, 1)
|
| 177 |
+
cv2.circle(self.imgs[img_id], (rect2[0], rect1[1]), int(10 * conf), c, 1)
|
| 178 |
+
|
| 179 |
+
def add_coco_bbox(self, bbox, cat, conf=1, show_txt=False, img_id='default'):
|
| 180 |
+
bbox = np.array(bbox, dtype=np.int32)
|
| 181 |
+
# cat = (int(cat) + 1) % 80
|
| 182 |
+
cat = int(cat)
|
| 183 |
+
# print('cat', cat, self.names[cat])
|
| 184 |
+
c = self.colors[cat][0][0].tolist()
|
| 185 |
+
if self.theme == 'white':
|
| 186 |
+
c = (255 - np.array(c)).tolist()
|
| 187 |
+
# txt = '{}{:.1f}'.format(self.names[cat], conf)
|
| 188 |
+
txt = '{}{:.1f}'.format(cat, conf)
|
| 189 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 190 |
+
cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
|
| 191 |
+
cv2.rectangle(
|
| 192 |
+
self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 1)
|
| 193 |
+
if show_txt:
|
| 194 |
+
cv2.rectangle(self.imgs[img_id],
|
| 195 |
+
(bbox[0], bbox[1] - cat_size[1] - 2),
|
| 196 |
+
(bbox[0] + cat_size[0], bbox[1] - 2), c, -1)
|
| 197 |
+
cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - 2),
|
| 198 |
+
font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
|
| 199 |
+
|
| 200 |
+
def add_4ps_coco_bbox(self, bbox, cat, conf=1, show_txt=False, img_id='default'):
|
| 201 |
+
bbox = np.array(bbox, dtype=np.int32)
|
| 202 |
+
# cat = (int(cat) + 1) % 80
|
| 203 |
+
cat = int(cat)
|
| 204 |
+
c = self.colors[cat][0][0].tolist()
|
| 205 |
+
if self.theme == 'white':
|
| 206 |
+
c = (255 - np.array(c)).tolist()
|
| 207 |
+
txt = '{}_{:.1f}_{}_{}'.format(str(cat), conf, bbox[-2], bbox[-1])
|
| 208 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 209 |
+
cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
|
| 210 |
+
cv2.line(self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2)
|
| 211 |
+
cv2.line(self.imgs[img_id], (bbox[2], bbox[3]), (bbox[4], bbox[5]), (0, 255, 0), 2)
|
| 212 |
+
cv2.line(self.imgs[img_id], (bbox[4], bbox[5]), (bbox[6], bbox[7]), (255, 0, 0), 2)
|
| 213 |
+
cv2.line(self.imgs[img_id], (bbox[6], bbox[7]), (bbox[0], bbox[1]), (0, 255, 255), 2)
|
| 214 |
+
# cv2.rectangle(
|
| 215 |
+
# self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0,0,255), 1)
|
| 216 |
+
if show_txt:
|
| 217 |
+
# cv2.rectangle(self.imgs[img_id],
|
| 218 |
+
# (bbox[0], bbox[1] - cat_size[1] - 2),
|
| 219 |
+
# (bbox[0] + cat_size[0], bbox[1] - 2), c, -1)
|
| 220 |
+
cv2.putText(self.imgs[img_id], txt, (int((bbox[0] + bbox[6]) / 2), int((bbox[1] + bbox[7]) / 2)),
|
| 221 |
+
font, 1, (0, 0, 255), thickness=1, lineType=cv2.LINE_AA)
|
| 222 |
+
|
| 223 |
+
def add_coco_hp(self, points, img_id='default'):
|
| 224 |
+
points = np.array(points, dtype=np.int32).reshape(self.num_joints, 2)
|
| 225 |
+
for j in range(self.num_joints):
|
| 226 |
+
cv2.circle(self.imgs[img_id],
|
| 227 |
+
(points[j, 0], points[j, 1]), 3, self.colors_hp[j], -1)
|
| 228 |
+
for j, e in enumerate(self.edges):
|
| 229 |
+
if points[e].min() > 0:
|
| 230 |
+
cv2.line(self.imgs[img_id], (points[e[0], 0], points[e[0], 1]),
|
| 231 |
+
(points[e[1], 0], points[e[1], 1]), self.ec[j], 2,
|
| 232 |
+
lineType=cv2.LINE_AA)
|
| 233 |
+
|
| 234 |
+
def add_points(self, points, img_id='default'):
|
| 235 |
+
num_classes = len(points)
|
| 236 |
+
# assert num_classes == len(self.colors)
|
| 237 |
+
for i in range(num_classes):
|
| 238 |
+
for j in range(len(points[i])):
|
| 239 |
+
c = self.colors[i, 0, 0]
|
| 240 |
+
cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio,
|
| 241 |
+
points[i][j][1] * self.down_ratio),
|
| 242 |
+
5, (255, 255, 255), -1)
|
| 243 |
+
cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio,
|
| 244 |
+
points[i][j][1] * self.down_ratio),
|
| 245 |
+
3, (int(c[0]), int(c[1]), int(c[2])), -1)
|
| 246 |
+
|
| 247 |
+
def add_corner(self, corner, img_id='default'):
|
| 248 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 249 |
+
cls = int(corner[2])
|
| 250 |
+
if cls == 0:
|
| 251 |
+
rgb = (0, 0, 255)
|
| 252 |
+
if cls == 1:
|
| 253 |
+
rgb = (0, 255, 0)
|
| 254 |
+
if cls == 2:
|
| 255 |
+
rgb = (255, 0, 0)
|
| 256 |
+
if cls == 3:
|
| 257 |
+
rgb = (0, 0, 0)
|
| 258 |
+
cv2.circle(self.imgs[img_id], (int(corner[0]), int(corner[1])), 3, (255, 0, 0), 2)
|
| 259 |
+
cv2.putText(self.imgs[img_id], str(cls), (int(corner[0]) - 5, int(corner[1]) - 5), font, 0.5, rgb, thickness=1,
|
| 260 |
+
lineType=cv2.LINE_AA)
|
| 261 |
+
|
| 262 |
+
def show_all_imgs(self, pause=False, time=0):
|
| 263 |
+
if not self.ipynb:
|
| 264 |
+
for i, v in self.imgs.items():
|
| 265 |
+
cv2.imshow('{}'.format(i), v)
|
| 266 |
+
if cv2.waitKey(0 if pause else 1) == 27:
|
| 267 |
+
import sys
|
| 268 |
+
sys.exit(0)
|
| 269 |
+
else:
|
| 270 |
+
self.ax = None
|
| 271 |
+
nImgs = len(self.imgs)
|
| 272 |
+
fig = self.plt.figure(figsize=(nImgs * 10, 10))
|
| 273 |
+
nCols = nImgs
|
| 274 |
+
nRows = nImgs // nCols
|
| 275 |
+
for i, (k, v) in enumerate(self.imgs.items()):
|
| 276 |
+
fig.add_subplot(1, nImgs, i + 1)
|
| 277 |
+
if len(v.shape) == 3:
|
| 278 |
+
self.plt.imshow(cv2.cvtColor(v, cv2.COLOR_BGR2RGB))
|
| 279 |
+
else:
|
| 280 |
+
self.plt.imshow(v)
|
| 281 |
+
self.plt.show()
|
| 282 |
+
|
| 283 |
+
def save_img(self, imgId='default', path='./cache/debug/'):
|
| 284 |
+
cv2.imwrite(path + '{}.png'.format(imgId), self.imgs[imgId])
|
| 285 |
+
|
| 286 |
+
def save_all_imgs(self, image_name, path='./cache/debug/', prefix='', genID=False):
|
| 287 |
+
if genID:
|
| 288 |
+
try:
|
| 289 |
+
idx = int(np.loadtxt(path + '/id.txt'))
|
| 290 |
+
except:
|
| 291 |
+
idx = 0
|
| 292 |
+
prefix = idx
|
| 293 |
+
np.savetxt(path + '/id.txt', np.ones(1) * (idx + 1), fmt='%d')
|
| 294 |
+
for i, v in self.imgs.items():
|
| 295 |
+
# pdb.set_trace()
|
| 296 |
+
# cv2.imwrite(path + '/{}{}.png'.format(prefix,i), v)
|
| 297 |
+
cv2.imwrite(path + '/%s' % image_name, v)
|
| 298 |
+
# print(path+'/%s'%image_name)
|
| 299 |
+
|
| 300 |
+
def remove_side(self, img_id, img):
|
| 301 |
+
if not (img_id in self.imgs):
|
| 302 |
+
return
|
| 303 |
+
ws = img.sum(axis=2).sum(axis=0)
|
| 304 |
+
l = 0
|
| 305 |
+
while ws[l] == 0 and l < len(ws):
|
| 306 |
+
l += 1
|
| 307 |
+
r = ws.shape[0] - 1
|
| 308 |
+
while ws[r] == 0 and r > 0:
|
| 309 |
+
r -= 1
|
| 310 |
+
hs = img.sum(axis=2).sum(axis=1)
|
| 311 |
+
t = 0
|
| 312 |
+
while hs[t] == 0 and t < len(hs):
|
| 313 |
+
t += 1
|
| 314 |
+
b = hs.shape[0] - 1
|
| 315 |
+
while hs[b] == 0 and b > 0:
|
| 316 |
+
b -= 1
|
| 317 |
+
self.imgs[img_id] = self.imgs[img_id][t:b + 1, l:r + 1].copy()
|
| 318 |
+
|
| 319 |
+
def project_3d_to_bird(self, pt):
|
| 320 |
+
pt[0] += self.world_size / 2
|
| 321 |
+
pt[1] = self.world_size - pt[1]
|
| 322 |
+
pt = pt * self.out_size / self.world_size
|
| 323 |
+
return pt.astype(np.int32)
|
| 324 |
+
|
| 325 |
+
def add_ct_detection(
|
| 326 |
+
self, img, dets, show_box=False, show_txt=True,
|
| 327 |
+
center_thresh=0.5, img_id='det'):
|
| 328 |
+
# dets: max_preds x 5
|
| 329 |
+
self.imgs[img_id] = img.copy()
|
| 330 |
+
if type(dets) == type({}):
|
| 331 |
+
for cat in dets:
|
| 332 |
+
for i in range(len(dets[cat])):
|
| 333 |
+
if dets[cat][i, 2] > center_thresh:
|
| 334 |
+
cl = (self.colors[cat, 0, 0]).tolist()
|
| 335 |
+
ct = dets[cat][i, :2].astype(np.int32)
|
| 336 |
+
if show_box:
|
| 337 |
+
w, h = dets[cat][i, -2], dets[cat][i, -1]
|
| 338 |
+
x, y = dets[cat][i, 0], dets[cat][i, 1]
|
| 339 |
+
bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2],
|
| 340 |
+
dtype=np.float32)
|
| 341 |
+
self.add_coco_bbox(
|
| 342 |
+
bbox, cat - 1, dets[cat][i, 2],
|
| 343 |
+
show_txt=show_txt, img_id=img_id)
|
| 344 |
+
else:
|
| 345 |
+
for i in range(len(dets)):
|
| 346 |
+
if dets[i, 2] > center_thresh:
|
| 347 |
+
# print('dets', dets[i])
|
| 348 |
+
cat = int(dets[i, -1])
|
| 349 |
+
cl = (self.colors[cat, 0, 0] if self.theme == 'black' else \
|
| 350 |
+
255 - self.colors[cat, 0, 0]).tolist()
|
| 351 |
+
ct = dets[i, :2].astype(np.int32) * self.down_ratio
|
| 352 |
+
cv2.circle(self.imgs[img_id], (ct[0], ct[1]), 3, cl, -1)
|
| 353 |
+
if show_box:
|
| 354 |
+
w, h = dets[i, -3] * self.down_ratio, dets[i, -2] * self.down_ratio
|
| 355 |
+
x, y = dets[i, 0] * self.down_ratio, dets[i, 1] * self.down_ratio
|
| 356 |
+
bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2],
|
| 357 |
+
dtype=np.float32)
|
| 358 |
+
self.add_coco_bbox(bbox, dets[i, -1], dets[i, 2], img_id=img_id)
|
| 359 |
+
|
| 360 |
+
def add_3d_detection(
|
| 361 |
+
self, image_or_path, dets, calib, show_txt=False,
|
| 362 |
+
center_thresh=0.5, img_id='det'):
|
| 363 |
+
if isinstance(image_or_path, np.ndarray):
|
| 364 |
+
self.imgs[img_id] = image_or_path
|
| 365 |
+
else:
|
| 366 |
+
self.imgs[img_id] = cv2.imread(image_or_path)
|
| 367 |
+
for cat in dets:
|
| 368 |
+
for i in range(len(dets[cat])):
|
| 369 |
+
cl = (self.colors[cat - 1, 0, 0]).tolist()
|
| 370 |
+
if dets[cat][i, -1] > center_thresh:
|
| 371 |
+
dim = dets[cat][i, 5:8]
|
| 372 |
+
loc = dets[cat][i, 8:11]
|
| 373 |
+
rot_y = dets[cat][i, 11]
|
| 374 |
+
# loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale
|
| 375 |
+
# dim = dim / self.dim_scale
|
| 376 |
+
if loc[2] > 1:
|
| 377 |
+
box_3d = compute_box_3d(dim, loc, rot_y)
|
| 378 |
+
box_2d = project_to_image(box_3d, calib)
|
| 379 |
+
self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl)
|
| 380 |
+
|
| 381 |
+
def compose_vis_add(
|
| 382 |
+
self, img_path, dets, calib,
|
| 383 |
+
center_thresh, pred, bev, img_id='out'):
|
| 384 |
+
self.imgs[img_id] = cv2.imread(img_path)
|
| 385 |
+
# h, w = self.imgs[img_id].shape[:2]
|
| 386 |
+
# pred = cv2.resize(pred, (h, w))
|
| 387 |
+
h, w = pred.shape[:2]
|
| 388 |
+
hs, ws = self.imgs[img_id].shape[0] / h, self.imgs[img_id].shape[1] / w
|
| 389 |
+
self.imgs[img_id] = cv2.resize(self.imgs[img_id], (w, h))
|
| 390 |
+
self.add_blend_img(self.imgs[img_id], pred, img_id)
|
| 391 |
+
for cat in dets:
|
| 392 |
+
for i in range(len(dets[cat])):
|
| 393 |
+
cl = (self.colors[cat - 1, 0, 0]).tolist()
|
| 394 |
+
if dets[cat][i, -1] > center_thresh:
|
| 395 |
+
dim = dets[cat][i, 5:8]
|
| 396 |
+
loc = dets[cat][i, 8:11]
|
| 397 |
+
rot_y = dets[cat][i, 11]
|
| 398 |
+
# loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale
|
| 399 |
+
# dim = dim / self.dim_scale
|
| 400 |
+
if loc[2] > 1:
|
| 401 |
+
box_3d = compute_box_3d(dim, loc, rot_y)
|
| 402 |
+
box_2d = project_to_image(box_3d, calib)
|
| 403 |
+
box_2d[:, 0] /= hs
|
| 404 |
+
box_2d[:, 1] /= ws
|
| 405 |
+
self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl)
|
| 406 |
+
self.imgs[img_id] = np.concatenate(
|
| 407 |
+
[self.imgs[img_id], self.imgs[bev]], axis=1)
|
| 408 |
+
|
| 409 |
+
def add_2d_detection(
|
| 410 |
+
self, img, dets, show_box=False, show_txt=True,
|
| 411 |
+
center_thresh=0.5, img_id='det'):
|
| 412 |
+
self.imgs[img_id] = img
|
| 413 |
+
for cat in dets:
|
| 414 |
+
for i in range(len(dets[cat])):
|
| 415 |
+
cl = (self.colors[cat - 1, 0, 0]).tolist()
|
| 416 |
+
if dets[cat][i, -1] > center_thresh:
|
| 417 |
+
bbox = dets[cat][i, 1:5]
|
| 418 |
+
self.add_coco_bbox(
|
| 419 |
+
bbox, cat - 1, dets[cat][i, -1],
|
| 420 |
+
show_txt=show_txt, img_id=img_id)
|
| 421 |
+
|
| 422 |
+
def add_bird_view(self, dets, center_thresh=0.3, img_id='bird'):
|
| 423 |
+
bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230
|
| 424 |
+
for cat in dets:
|
| 425 |
+
cl = (self.colors[cat - 1, 0, 0]).tolist()
|
| 426 |
+
lc = (250, 152, 12)
|
| 427 |
+
for i in range(len(dets[cat])):
|
| 428 |
+
if dets[cat][i, -1] > center_thresh:
|
| 429 |
+
dim = dets[cat][i, 5:8]
|
| 430 |
+
loc = dets[cat][i, 8:11]
|
| 431 |
+
rot_y = dets[cat][i, 11]
|
| 432 |
+
rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]]
|
| 433 |
+
for k in range(4):
|
| 434 |
+
rect[k] = self.project_3d_to_bird(rect[k])
|
| 435 |
+
# cv2.circle(bird_view, (rect[k][0], rect[k][1]), 2, lc, -1)
|
| 436 |
+
cv2.polylines(
|
| 437 |
+
bird_view, [rect.reshape(-1, 1, 2).astype(np.int32)],
|
| 438 |
+
True, lc, 2, lineType=cv2.LINE_AA)
|
| 439 |
+
for e in [[0, 1]]:
|
| 440 |
+
t = 4 if e == [0, 1] else 1
|
| 441 |
+
cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]),
|
| 442 |
+
(rect[e[1]][0], rect[e[1]][1]), lc, t,
|
| 443 |
+
lineType=cv2.LINE_AA)
|
| 444 |
+
self.imgs[img_id] = bird_view
|
| 445 |
+
|
| 446 |
+
def add_bird_views(self, dets_dt, dets_gt, center_thresh=0.3, img_id='bird'):
|
| 447 |
+
alpha = 0.5
|
| 448 |
+
bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230
|
| 449 |
+
for ii, (dets, lc, cc) in enumerate(
|
| 450 |
+
[(dets_gt, (12, 49, 250), (0, 0, 255)),
|
| 451 |
+
(dets_dt, (250, 152, 12), (255, 0, 0))]):
|
| 452 |
+
# cc = np.array(lc, dtype=np.uint8).reshape(1, 1, 3)
|
| 453 |
+
for cat in dets:
|
| 454 |
+
cl = (self.colors[cat - 1, 0, 0]).tolist()
|
| 455 |
+
for i in range(len(dets[cat])):
|
| 456 |
+
if dets[cat][i, -1] > center_thresh:
|
| 457 |
+
dim = dets[cat][i, 5:8]
|
| 458 |
+
loc = dets[cat][i, 8:11]
|
| 459 |
+
rot_y = dets[cat][i, 11]
|
| 460 |
+
rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]]
|
| 461 |
+
for k in range(4):
|
| 462 |
+
rect[k] = self.project_3d_to_bird(rect[k])
|
| 463 |
+
if ii == 0:
|
| 464 |
+
cv2.fillPoly(
|
| 465 |
+
bird_view, [rect.reshape(-1, 1, 2).astype(np.int32)],
|
| 466 |
+
lc, lineType=cv2.LINE_AA)
|
| 467 |
+
else:
|
| 468 |
+
cv2.polylines(
|
| 469 |
+
bird_view, [rect.reshape(-1, 1, 2).astype(np.int32)],
|
| 470 |
+
True, lc, 2, lineType=cv2.LINE_AA)
|
| 471 |
+
# for e in [[0, 1], [1, 2], [2, 3], [3, 0]]:
|
| 472 |
+
for e in [[0, 1]]:
|
| 473 |
+
t = 4 if e == [0, 1] else 1
|
| 474 |
+
cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]),
|
| 475 |
+
(rect[e[1]][0], rect[e[1]][1]), lc, t,
|
| 476 |
+
lineType=cv2.LINE_AA)
|
| 477 |
+
self.imgs[img_id] = bird_view
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
kitti_class_name = [
|
| 481 |
+
'p', 'v', 'b'
|
| 482 |
+
]
|
| 483 |
+
|
| 484 |
+
gta_class_name = [
|
| 485 |
+
'p', 'v'
|
| 486 |
+
]
|
| 487 |
+
|
| 488 |
+
pascal_class_name = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
|
| 489 |
+
"car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
|
| 490 |
+
"person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
|
| 491 |
+
|
| 492 |
+
coco_class_name = [
|
| 493 |
+
'person', 'bicycle', 'car', 'motorcycle', 'airplane',
|
| 494 |
+
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
|
| 495 |
+
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
|
| 496 |
+
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
|
| 497 |
+
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
|
| 498 |
+
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
|
| 499 |
+
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
|
| 500 |
+
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
|
| 501 |
+
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
|
| 502 |
+
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
|
| 503 |
+
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
|
| 504 |
+
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
|
| 505 |
+
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
|
| 506 |
+
]
|
| 507 |
+
|
| 508 |
+
table_class_name = ["box"]
|
| 509 |
+
|
| 510 |
+
huntie_class_name = ['hcp', 'fjxcd', 'czcfp', 'defp', 'zzsfp',
|
| 511 |
+
'qtfp', 'sfz_front', 'sfz_back', 'xsz_first', 'xsz_second',
|
| 512 |
+
'bank_card', 'jsz_first', 'roll_ticket', 'czr', 'huzhu', 'FedEx',
|
| 513 |
+
'birth_certification', 'blicence', 'car_invoice', 'estate', 'food_blicence',
|
| 514 |
+
'food_plicence', "jsz_first", "passport_china", "permit_china",
|
| 515 |
+
"permit_china_miner", "house_cert", "book_blicense", "medical_license",
|
| 516 |
+
"medical_instrument_license"]
|
| 517 |
+
|
| 518 |
+
video_class_name = ['phone_contract', 'phone_signature']
|
| 519 |
+
|
| 520 |
+
vehicle_class_name = ["first", "second"]
|
| 521 |
+
|
| 522 |
+
color_list = np.array(
|
| 523 |
+
[
|
| 524 |
+
1.000, 1.000, 1.000,
|
| 525 |
+
0.850, 0.325, 0.098,
|
| 526 |
+
0.929, 0.694, 0.125,
|
| 527 |
+
0.494, 0.184, 0.556,
|
| 528 |
+
0.466, 0.674, 0.188,
|
| 529 |
+
0.301, 0.745, 0.933,
|
| 530 |
+
0.635, 0.078, 0.184,
|
| 531 |
+
0.300, 0.300, 0.300,
|
| 532 |
+
0.600, 0.600, 0.600,
|
| 533 |
+
1.000, 0.000, 0.000,
|
| 534 |
+
1.000, 0.500, 0.000,
|
| 535 |
+
0.749, 0.749, 0.000,
|
| 536 |
+
0.000, 1.000, 0.000,
|
| 537 |
+
0.000, 0.000, 1.000,
|
| 538 |
+
0.667, 0.000, 1.000,
|
| 539 |
+
0.333, 0.333, 0.000,
|
| 540 |
+
0.333, 0.667, 0.000,
|
| 541 |
+
0.333, 1.000, 0.000,
|
| 542 |
+
0.667, 0.333, 0.000,
|
| 543 |
+
0.667, 0.667, 0.000,
|
| 544 |
+
0.667, 1.000, 0.000,
|
| 545 |
+
1.000, 0.333, 0.000,
|
| 546 |
+
1.000, 0.667, 0.000,
|
| 547 |
+
1.000, 1.000, 0.000,
|
| 548 |
+
0.000, 0.333, 0.500,
|
| 549 |
+
0.000, 0.667, 0.500,
|
| 550 |
+
0.000, 1.000, 0.500,
|
| 551 |
+
0.333, 0.000, 0.500,
|
| 552 |
+
0.333, 0.333, 0.500,
|
| 553 |
+
0.333, 0.667, 0.500,
|
| 554 |
+
0.333, 1.000, 0.500,
|
| 555 |
+
0.667, 0.000, 0.500,
|
| 556 |
+
0.667, 0.333, 0.500,
|
| 557 |
+
0.667, 0.667, 0.500,
|
| 558 |
+
0.667, 1.000, 0.500,
|
| 559 |
+
1.000, 0.000, 0.500,
|
| 560 |
+
1.000, 0.333, 0.500,
|
| 561 |
+
1.000, 0.667, 0.500,
|
| 562 |
+
1.000, 1.000, 0.500,
|
| 563 |
+
0.000, 0.333, 1.000,
|
| 564 |
+
0.000, 0.667, 1.000,
|
| 565 |
+
0.000, 1.000, 1.000,
|
| 566 |
+
0.333, 0.000, 1.000,
|
| 567 |
+
0.333, 0.333, 1.000,
|
| 568 |
+
0.333, 0.667, 1.000,
|
| 569 |
+
0.333, 1.000, 1.000,
|
| 570 |
+
0.667, 0.000, 1.000,
|
| 571 |
+
0.667, 0.333, 1.000,
|
| 572 |
+
0.667, 0.667, 1.000,
|
| 573 |
+
0.667, 1.000, 1.000,
|
| 574 |
+
1.000, 0.000, 1.000,
|
| 575 |
+
1.000, 0.333, 1.000,
|
| 576 |
+
1.000, 0.667, 1.000,
|
| 577 |
+
0.167, 0.000, 0.000,
|
| 578 |
+
0.333, 0.000, 0.000,
|
| 579 |
+
0.500, 0.000, 0.000,
|
| 580 |
+
0.667, 0.000, 0.000,
|
| 581 |
+
0.833, 0.000, 0.000,
|
| 582 |
+
1.000, 0.000, 0.000,
|
| 583 |
+
0.000, 0.167, 0.000,
|
| 584 |
+
0.000, 0.333, 0.000,
|
| 585 |
+
0.000, 0.500, 0.000,
|
| 586 |
+
0.000, 0.667, 0.000,
|
| 587 |
+
0.000, 0.833, 0.000,
|
| 588 |
+
0.000, 1.000, 0.000,
|
| 589 |
+
0.000, 0.000, 0.167,
|
| 590 |
+
0.000, 0.000, 0.333,
|
| 591 |
+
0.000, 0.000, 0.500,
|
| 592 |
+
0.000, 0.000, 0.667,
|
| 593 |
+
0.000, 0.000, 0.833,
|
| 594 |
+
0.000, 0.000, 1.000,
|
| 595 |
+
0.000, 0.000, 0.000,
|
| 596 |
+
0.143, 0.143, 0.143,
|
| 597 |
+
0.286, 0.286, 0.286,
|
| 598 |
+
0.429, 0.429, 0.429,
|
| 599 |
+
0.571, 0.571, 0.571,
|
| 600 |
+
0.714, 0.714, 0.714,
|
| 601 |
+
0.857, 0.857, 0.857,
|
| 602 |
+
0.000, 0.447, 0.741,
|
| 603 |
+
0.50, 0.5, 0
|
| 604 |
+
]
|
| 605 |
+
).astype(np.float32)
|
| 606 |
+
color_list = color_list.reshape((-1, 3)) * 255
|
pix2text/doc_xl_layout/utils/evaluation_bk.py
ADDED
|
@@ -0,0 +1,437 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
from shapely.geometry import Polygon
|
| 8 |
+
from tabulate import tabulate
|
| 9 |
+
import time
|
| 10 |
+
|
| 11 |
+
def visual_badcase(image_name, pred_list, label_list, output_dir="visual_badcase", info=None, prefix=''):
|
| 12 |
+
"""
|
| 13 |
+
"""
|
| 14 |
+
image_name = image_name + '.jpg'
|
| 15 |
+
if not os.path.exists(output_dir):
|
| 16 |
+
os.makedirs(output_dir)
|
| 17 |
+
|
| 18 |
+
image_dir = os.path.abspath('../../data/huntie/test_images/')
|
| 19 |
+
image_path = os.path.join(image_dir, image_name)
|
| 20 |
+
img = cv2.imread(image_path)
|
| 21 |
+
if img is None:
|
| 22 |
+
print("--> Warning: skip, given image dir NOT exists: {}".format(image_path))
|
| 23 |
+
return None
|
| 24 |
+
|
| 25 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 26 |
+
img = cv2.imread(image_path)
|
| 27 |
+
for label in label_list:
|
| 28 |
+
points, class_id = label[:8], label[8]
|
| 29 |
+
pts = np.array(points).reshape((1, -1, 2)).astype(np.int32)
|
| 30 |
+
cv2.polylines(img, pts, isClosed=True, color=(0, 255, 0), thickness=3)
|
| 31 |
+
cv2.putText(img, "gt:" + str(class_id), tuple(pts[0][0].tolist()), font, 1, (0, 255, 0), 2)
|
| 32 |
+
|
| 33 |
+
for label in pred_list:
|
| 34 |
+
points, class_id = label[:8], label[8]
|
| 35 |
+
pts = np.array(points).reshape((1, -1, 2)).astype(np.int32)
|
| 36 |
+
cv2.polylines(img, pts, isClosed=True, color=(255, 0, 0), thickness=3)
|
| 37 |
+
cv2.putText(img, "pred:" + str(class_id), tuple(pts[0][-1].tolist()), font, 1, (255, 0, 0), 2)
|
| 38 |
+
|
| 39 |
+
if info is not None:
|
| 40 |
+
cv2.putText(img, str(info), (40, 40), font, 1, (0, 0, 255), 2)
|
| 41 |
+
output_path = os.path.join(output_dir, prefix + os.path.basename(image_path))
|
| 42 |
+
print("--> info: visualizing badcase: {}".format(output_path))
|
| 43 |
+
cv2.imwrite(output_path, img)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def load_gt_from_json(json_path):
|
| 47 |
+
"""
|
| 48 |
+
"""
|
| 49 |
+
with open(json_path) as f:
|
| 50 |
+
gt_info = json.load(f)
|
| 51 |
+
gt_image_list = gt_info["images"]
|
| 52 |
+
gt_anno_list = gt_info["annotations"]
|
| 53 |
+
|
| 54 |
+
id_to_image_info = {}
|
| 55 |
+
for image_item in gt_image_list:
|
| 56 |
+
id_to_image_info[image_item['id']] = {
|
| 57 |
+
"file_name": image_item['file_name'],
|
| 58 |
+
"group_name": image_item.get("group_name", "huntie")
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
group_info = {}
|
| 62 |
+
for annotation_item in gt_anno_list:
|
| 63 |
+
image_info = id_to_image_info[annotation_item['image_id']]
|
| 64 |
+
image_name, group_name = image_info["file_name"], image_info["group_name"]
|
| 65 |
+
|
| 66 |
+
if group_name not in group_info:
|
| 67 |
+
group_info[group_name] = {}
|
| 68 |
+
if image_name not in group_info[group_name]:
|
| 69 |
+
group_info[group_name][image_name] = []
|
| 70 |
+
anno_info = {
|
| 71 |
+
"category_id": annotation_item["category_id"],
|
| 72 |
+
"poly": annotation_item["poly"],
|
| 73 |
+
"secondary_id": annotation_item.get("secondary_id", -1),
|
| 74 |
+
"direction_id": annotation_item.get("direction_id", -1)
|
| 75 |
+
}
|
| 76 |
+
group_info[group_name][image_name].append(anno_info)
|
| 77 |
+
|
| 78 |
+
group_info_str = ", ".join(["{}[{}]".format(k, len(v)) for k, v in group_info.items()])
|
| 79 |
+
print("--> load {} groups: {}".format(len(group_info.keys()), group_info_str))
|
| 80 |
+
return group_info
|
| 81 |
+
|
| 82 |
+
def save_res_to_file(table_head, table_body_sorted):
|
| 83 |
+
with open('val_out.txt', 'a') as fout:
|
| 84 |
+
fout.write(time.strftime('%Y-%m-%d-%H-%M') + '\n')
|
| 85 |
+
fout.write('\t'.join(table_head) + '\n')
|
| 86 |
+
for line in table_body_sorted:
|
| 87 |
+
new_line = []
|
| 88 |
+
for ele in line:
|
| 89 |
+
if isinstance(ele, int):
|
| 90 |
+
new_line.append('{:d}'.format(ele))
|
| 91 |
+
elif isinstance(ele, float):
|
| 92 |
+
new_line.append('{:.6f}'.format(ele))
|
| 93 |
+
elif isinstance(ele, str):
|
| 94 |
+
new_line.append(ele)
|
| 95 |
+
fout.write('\t'.join(new_line) + '\n')
|
| 96 |
+
|
| 97 |
+
def calc_iou(label, detect):
|
| 98 |
+
label_box = []
|
| 99 |
+
detect_box = []
|
| 100 |
+
|
| 101 |
+
d_area = []
|
| 102 |
+
for i in range(0, len(detect)):
|
| 103 |
+
pred_poly = detect[i]["poly"]
|
| 104 |
+
box_det = []
|
| 105 |
+
for k in range(0, 4):
|
| 106 |
+
box_det.append([pred_poly[2 * k], pred_poly[2 * k + 1]])
|
| 107 |
+
detect_box.append(box_det)
|
| 108 |
+
try:
|
| 109 |
+
poly = Polygon(box_det)
|
| 110 |
+
d_area.append(poly.area)
|
| 111 |
+
except:
|
| 112 |
+
print('invalid detects', pred_poly)
|
| 113 |
+
exit(-1)
|
| 114 |
+
|
| 115 |
+
l_area = []
|
| 116 |
+
for i in range(0, len(label)):
|
| 117 |
+
gt_poly = label[i]["poly"]
|
| 118 |
+
box_gt = []
|
| 119 |
+
for k in range(4):
|
| 120 |
+
box_gt.append([gt_poly[2 * k], gt_poly[2 * k + 1]])
|
| 121 |
+
label_box.append(box_gt)
|
| 122 |
+
try:
|
| 123 |
+
poly = Polygon(box_gt)
|
| 124 |
+
l_area.append(poly.area)
|
| 125 |
+
except:
|
| 126 |
+
print('invalid detects', gt_poly)
|
| 127 |
+
exit(-1)
|
| 128 |
+
|
| 129 |
+
ol_areas = []
|
| 130 |
+
for i in range(0, len(detect_box)):
|
| 131 |
+
ol_areas.append([])
|
| 132 |
+
poly1 = Polygon(detect_box[i])
|
| 133 |
+
for j in range(0, len(label_box)):
|
| 134 |
+
poly2 = Polygon(label_box[j])
|
| 135 |
+
try:
|
| 136 |
+
ol_area = poly2.intersection(poly1).area
|
| 137 |
+
except:
|
| 138 |
+
print('invaild pair', detect_box[i], label_box[j])
|
| 139 |
+
ol_areas[i].append(0.0)
|
| 140 |
+
else:
|
| 141 |
+
ol_areas[i].append(ol_area)
|
| 142 |
+
|
| 143 |
+
d_ious = [0.0] * len(detect_box)
|
| 144 |
+
l_ious = [0.0] * len(label_box)
|
| 145 |
+
det2label_idx = [-1] * len(detect_box) # 每个检测框iou最大标注框的index
|
| 146 |
+
for i in range(0, len(detect_box)):
|
| 147 |
+
for j in range(0, len(label_box)):
|
| 148 |
+
if int(label[j]["category_id"]) == int(detect[i]["category_id"]):
|
| 149 |
+
# iou = min(ol_areas[i][j] / (d_area[i] + 1e-10), ol_areas[i][j] / (l_area[j] + 1e-10))
|
| 150 |
+
iou = ol_areas[i][j] / (d_area[i] + l_area[j] - ol_areas[i][j] + 1e-10)
|
| 151 |
+
else:
|
| 152 |
+
iou = 0
|
| 153 |
+
det2label_idx[i] = j if iou > d_ious[i] else det2label_idx[i]
|
| 154 |
+
d_ious[i] = max(d_ious[i], iou)
|
| 155 |
+
l_ious[j] = max(l_ious[j], iou)
|
| 156 |
+
return l_ious, d_ious, det2label_idx
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def eval(instance_info):
|
| 160 |
+
img_name, label_info = instance_info
|
| 161 |
+
label = label_info['gt']
|
| 162 |
+
detect = label_info['det']
|
| 163 |
+
l_ious, d_ious, det2label_idx = calc_iou(label, detect)
|
| 164 |
+
return [img_name, d_ious, l_ious, detect, label, det2label_idx]
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def static_with_class(rets, iou_thresh=0.7, is_verbose=True, map_info=None):
|
| 168 |
+
if is_verbose:
|
| 169 |
+
table_head = ['Class_id', 'Class_name', 'Pre_hit', 'Pre_num', 'GT_hit', 'GT_num', 'Precision', 'Recall', 'F-score', 'All_recalled', 'Img_num', 'Acc.']
|
| 170 |
+
else:
|
| 171 |
+
table_head = ['Class_id', 'Class_name', 'Precision', 'Recall', 'F-score']
|
| 172 |
+
table_body = []
|
| 173 |
+
class_dict = {}
|
| 174 |
+
all_dict = {} # 用以统计合计结果
|
| 175 |
+
all_dict['dm'] = 0
|
| 176 |
+
all_dict['dv'] = 0
|
| 177 |
+
all_dict['lm'] = 0
|
| 178 |
+
all_dict['lv'] = 0
|
| 179 |
+
all_dict['Img_num'] = 0
|
| 180 |
+
all_dict['All_recalled'] = 0
|
| 181 |
+
|
| 182 |
+
no_need_keys = ['group_name', 'poly', 'score' , 'category_id']
|
| 183 |
+
# import pdb; pdb.set_trace()
|
| 184 |
+
extra_keys = [_ for _ in rets[0][4][0].keys() if _ not in no_need_keys]
|
| 185 |
+
# extra_table_heads = [[_] for _ in rets[0][4][0].keys() if _ not in no_need_keys]
|
| 186 |
+
extra_table_heads = {}
|
| 187 |
+
extra_dict = {}
|
| 188 |
+
extra_table_body = {}
|
| 189 |
+
for key in extra_keys:
|
| 190 |
+
extra_table_heads[key] = [key, 'Name', 'Pre_hit', 'Pre_num', 'GT_hit', 'GT_num', 'Precision', 'Recall', 'F-score']
|
| 191 |
+
extra_dict[key] = {}
|
| 192 |
+
extra_table_body[key] = []
|
| 193 |
+
# _ += ['Pre_hit', 'Pre_num', 'GT_hit', 'GT_num', 'Precision', 'Recall', 'F-score']
|
| 194 |
+
|
| 195 |
+
# pdb.set_trace()
|
| 196 |
+
for i in range(len(rets)):
|
| 197 |
+
img_name, d_ious, l_ious, detects, labels, det2label_idx = rets[i]
|
| 198 |
+
item_lv, item_dv, item_dm, item_lm = 0, 0, 0, 0
|
| 199 |
+
current_dict = {}
|
| 200 |
+
|
| 201 |
+
for label in labels:
|
| 202 |
+
item_lv += 1
|
| 203 |
+
category_id = label["category_id"]
|
| 204 |
+
if category_id not in class_dict:
|
| 205 |
+
class_dict[category_id] = {}
|
| 206 |
+
class_dict[category_id]['dm'] = 0
|
| 207 |
+
class_dict[category_id]['dv'] = 0
|
| 208 |
+
class_dict[category_id]['lm'] = 0
|
| 209 |
+
class_dict[category_id]['lv'] = 0
|
| 210 |
+
class_dict[category_id]['Img_num'] = 0
|
| 211 |
+
class_dict[category_id]['All_recalled'] = 0
|
| 212 |
+
class_dict[category_id]['lv'] += 1
|
| 213 |
+
|
| 214 |
+
category_container = []
|
| 215 |
+
for label in labels:
|
| 216 |
+
if label['category_id'] not in category_container:
|
| 217 |
+
category_container.append(label['category_id'])
|
| 218 |
+
for category_id in category_container:
|
| 219 |
+
class_dict[category_id]['Img_num'] += 1
|
| 220 |
+
current_dict[category_id] = {'dm':0, 'dv':0, 'lm':0, 'lv':0, 'Img_num':0, 'All_recalled':0}
|
| 221 |
+
# 统计各额外key的id list和label、detect中检出的量
|
| 222 |
+
for key in extra_keys:
|
| 223 |
+
for label in labels:
|
| 224 |
+
if label[key] not in extra_dict[key] and label[key] != -1:
|
| 225 |
+
extra_dict[key][label[key]] = {'dm':0, 'dv':0, 'lm':0, 'lv':0}
|
| 226 |
+
for det in detects:
|
| 227 |
+
if det[key] not in extra_dict[key] and det[key] != -1:
|
| 228 |
+
extra_dict[key][det[key]] = {'dm':0, 'dv':0, 'lm':0, 'lv':0}
|
| 229 |
+
for label in labels:
|
| 230 |
+
if label[key] != -1:
|
| 231 |
+
extra_dict[key][label[key]]['lv'] += 1
|
| 232 |
+
for det in detects:
|
| 233 |
+
if det[key] != -1:
|
| 234 |
+
try:
|
| 235 |
+
extra_dict[key][det[key]]['dv'] += 1
|
| 236 |
+
except:
|
| 237 |
+
import pdb; pdb.set_trace()
|
| 238 |
+
|
| 239 |
+
for label in labels:
|
| 240 |
+
current_dict[label['category_id']]['lv'] += 1
|
| 241 |
+
for det in detects:
|
| 242 |
+
current_dict[label['category_id']]['dv'] += 1
|
| 243 |
+
|
| 244 |
+
for det in detects:
|
| 245 |
+
item_dv += 1
|
| 246 |
+
category_id = det["category_id"]
|
| 247 |
+
if category_id not in class_dict:
|
| 248 |
+
print("--> category_id not exists in gt: {}".format(category_id))
|
| 249 |
+
continue
|
| 250 |
+
class_dict[category_id]['dv'] += 1
|
| 251 |
+
|
| 252 |
+
for idx, iou in enumerate(d_ious):
|
| 253 |
+
if iou >= iou_thresh:
|
| 254 |
+
item_dm += 1
|
| 255 |
+
class_dict[detects[idx]["category_id"]]['dm'] += 1
|
| 256 |
+
current_dict[detects[idx]["category_id"]]['dm'] += 1
|
| 257 |
+
|
| 258 |
+
for key in extra_keys:
|
| 259 |
+
if labels[det2label_idx[idx]][key] != -1 and detects[idx][key] == labels[det2label_idx[idx]][key]:
|
| 260 |
+
extra_dict[key][detects[idx][key]]['dm'] += 1
|
| 261 |
+
extra_dict[key][detects[idx][key]]['lm'] += 1
|
| 262 |
+
|
| 263 |
+
for idx, iou in enumerate(l_ious):
|
| 264 |
+
if iou >= iou_thresh:
|
| 265 |
+
item_lm += 1
|
| 266 |
+
class_dict[labels[idx]["category_id"]]['lm'] += 1
|
| 267 |
+
current_dict[labels[idx]["category_id"]]['lm'] += 1
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
# 将recall append到结果list当中
|
| 272 |
+
item_r = item_lm / (item_lv + 1e-6)
|
| 273 |
+
# item_p = item_dm / (item_dv + 1e-6)
|
| 274 |
+
# item_f = 2 * item_p * item_r / (item_p + item_r + 1e-6)
|
| 275 |
+
# if (1 - item_r) < 1e-5:
|
| 276 |
+
# class_dict[category_id]['All_recalled'] += 1
|
| 277 |
+
rets[i].append(item_r)
|
| 278 |
+
|
| 279 |
+
# 计算各个box类别全召回率
|
| 280 |
+
for category_id in category_container:
|
| 281 |
+
id_recall = current_dict[category_id]['lm'] / (current_dict[category_id]['lv'] + 1e-6)
|
| 282 |
+
if (1 - id_recall) < 1e-5:
|
| 283 |
+
class_dict[category_id]['All_recalled'] += 1
|
| 284 |
+
|
| 285 |
+
# 计算所有类别总计的全召回率
|
| 286 |
+
# import pdb; pdb.set_trace()
|
| 287 |
+
all_dict['dv'] += item_dv
|
| 288 |
+
all_dict['lv'] += item_lv
|
| 289 |
+
all_dict['dm'] += item_dm
|
| 290 |
+
all_dict['lm'] += item_lm
|
| 291 |
+
all_dict['Img_num'] += 1
|
| 292 |
+
if (1 - item_r) < 1e-5:
|
| 293 |
+
all_dict['All_recalled'] += 1
|
| 294 |
+
|
| 295 |
+
# if img_name == 'train10w_val2w_69008cd9828a455fb1bf751a95ad8921.jpg':
|
| 296 |
+
# import pdb; pdb.set_trace()
|
| 297 |
+
# if item_r
|
| 298 |
+
# if item_f < 0.97 and is_save_badcase:
|
| 299 |
+
# prefix = '_'.join(map(str, sorted(list(badcase_class_set)))) + '_'
|
| 300 |
+
# item_info = "IOU{}, {}, {}, {}".format(iou_thresh, item_r, item_p, item_f)
|
| 301 |
+
# visual_badcase(img_name, detects, labels, output_dir="visual_badcase", info=item_info, prefix=prefix)
|
| 302 |
+
|
| 303 |
+
dm, dv, lm, lv, total, recalled = 0, 0, 0, 0, 0, 0
|
| 304 |
+
map_info = {} if map_info is None else map_info
|
| 305 |
+
for key in class_dict.keys():
|
| 306 |
+
dm += class_dict[key]['dm']
|
| 307 |
+
dv += class_dict[key]['dv']
|
| 308 |
+
lm += class_dict[key]['lm']
|
| 309 |
+
lv += class_dict[key]['lv']
|
| 310 |
+
recalled += class_dict[category_id]['All_recalled']
|
| 311 |
+
total += class_dict[key]['Img_num']
|
| 312 |
+
p = class_dict[key]['dm'] / (class_dict[key]['dv'] + 1e-6)
|
| 313 |
+
r = class_dict[key]['lm'] / (class_dict[key]['lv'] + 1e-6)
|
| 314 |
+
fscore = 2 * p * r / (p + r + 1e-6)
|
| 315 |
+
acc = class_dict[key]['All_recalled'] / (class_dict[key]['Img_num'] + 1e-6)
|
| 316 |
+
if is_verbose:
|
| 317 |
+
table_body.append((key, map_info.get("primary_map", {}).get(str(key), str(key)), class_dict[key]['dm'],
|
| 318 |
+
class_dict[key]['dv'], class_dict[key]['lm'], class_dict[key]['lv'], p, r, fscore,
|
| 319 |
+
class_dict[category_id]['All_recalled'], class_dict[key]['Img_num'], acc))
|
| 320 |
+
else:
|
| 321 |
+
table_body.append((key, map_info.get(str(key), str(key)), p, r, fscore))
|
| 322 |
+
|
| 323 |
+
p = dm / (dv + 1e-6)
|
| 324 |
+
r = lm / (lv + 1e-6)
|
| 325 |
+
f = 2 * p * r / (p + r + 1e-6)
|
| 326 |
+
acc = recalled / (total + 1e-6)
|
| 327 |
+
table_body_sorted = sorted(table_body, key=lambda x: int((x[0])))
|
| 328 |
+
if is_verbose:
|
| 329 |
+
table_body_sorted.append(('IOU_{}'.format(iou_thresh), 'average', dm, dv, lm, lv, p, r, f,
|
| 330 |
+
all_dict['All_recalled'], all_dict['Img_num'], (all_dict['All_recalled']/all_dict['Img_num']+1e-6)))
|
| 331 |
+
else:
|
| 332 |
+
table_body_sorted.append(('IOU_{}'.format(iou_thresh), 'average', p, r, f))
|
| 333 |
+
# import pdb; pdb.set_trace()
|
| 334 |
+
save_res_to_file(table_head, table_body_sorted)
|
| 335 |
+
print(tabulate(table_body_sorted, headers=table_head, tablefmt='pipe'))
|
| 336 |
+
# ---------------print(extra_keys)
|
| 337 |
+
for _key in extra_dict.keys():
|
| 338 |
+
dm, dv, lm, lv = 0, 0, 0, 0
|
| 339 |
+
for key in extra_dict[_key].keys():
|
| 340 |
+
dm += extra_dict[_key][key]['dm']
|
| 341 |
+
dv += extra_dict[_key][key]['dv']
|
| 342 |
+
lm += extra_dict[_key][key]['lm']
|
| 343 |
+
lv += extra_dict[_key][key]['lv']
|
| 344 |
+
# 找当前key对应的map_info key的name
|
| 345 |
+
map_name = ''
|
| 346 |
+
for candidate_name in map_info.keys():
|
| 347 |
+
if candidate_name.split('_')[0] == _key.split('_')[0]:
|
| 348 |
+
map_name = candidate_name
|
| 349 |
+
|
| 350 |
+
precision = extra_dict[_key][key]['dm'] / (extra_dict[_key][key]['dv'] + 1e-6)
|
| 351 |
+
recall = extra_dict[_key][key]['lm'] / (extra_dict[_key][key]['lv'] + 1e-6)
|
| 352 |
+
fscore = 2 * precision * recall / (precision + recall + 1e-6)
|
| 353 |
+
if map_name == '': # 没有在map_info中找到对应类表
|
| 354 |
+
extra_table_body[_key].append((key, '', extra_dict[_key][key]['dm'], extra_dict[_key][key]['dv'],
|
| 355 |
+
extra_dict[_key][key]['lm'], extra_dict[_key][key]['lv'],
|
| 356 |
+
precision, recall, fscore))
|
| 357 |
+
else:
|
| 358 |
+
extra_table_body[_key].append((key, map_info.get(map_name, {}).get(str(key), str(key)), extra_dict[_key][key]['dm'], extra_dict[_key][key]['dv'],
|
| 359 |
+
extra_dict[_key][key]['lm'], extra_dict[_key][key]['lv'],
|
| 360 |
+
precision, recall, fscore))
|
| 361 |
+
extra_table_body[_key] = sorted(extra_table_body[_key], key=lambda x: int((x[0])))
|
| 362 |
+
p = dm / (dv + 1e-6)
|
| 363 |
+
r = lm / (lv + 1e-6)
|
| 364 |
+
f = 2 * p * r / (p + r + 1e-6)
|
| 365 |
+
extra_table_body[_key].append((key, 'average', dm, dv, lm, lv, p, r, f))
|
| 366 |
+
# import pdb; pdb.set_trace()
|
| 367 |
+
for _key in extra_keys:
|
| 368 |
+
save_res_to_file(extra_table_heads[_key], extra_table_body[_key])
|
| 369 |
+
print(tabulate(extra_table_body[_key], headers=extra_table_heads[_key], tablefmt='pipe'))
|
| 370 |
+
|
| 371 |
+
return [table_head] + table_body_sorted
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def multiproc(func, task_list, proc_num=30, retv=True, progress_bar=False):
|
| 375 |
+
from multiprocessing import Pool
|
| 376 |
+
pool = Pool(proc_num)
|
| 377 |
+
|
| 378 |
+
rets = []
|
| 379 |
+
if progress_bar:
|
| 380 |
+
import tqdm
|
| 381 |
+
with tqdm.tqdm(total=len(task_list)) as t:
|
| 382 |
+
for ret in pool.imap(func, task_list):
|
| 383 |
+
rets.append(ret)
|
| 384 |
+
t.update(1)
|
| 385 |
+
else:
|
| 386 |
+
for ret in pool.imap(func, task_list):
|
| 387 |
+
rets.append(ret)
|
| 388 |
+
|
| 389 |
+
pool.close()
|
| 390 |
+
pool.join()
|
| 391 |
+
|
| 392 |
+
if retv:
|
| 393 |
+
return rets
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
def eval_and_show(label_dict, detect_dict, output_dir, iou_thresh=0.7, map_info=None):
|
| 397 |
+
"""
|
| 398 |
+
"""
|
| 399 |
+
evaluation_group_info = {}
|
| 400 |
+
for group_name, gt_info in label_dict.items():
|
| 401 |
+
group_pair_list = []
|
| 402 |
+
for file_name, value_list in gt_info.items():
|
| 403 |
+
if file_name not in detect_dict:
|
| 404 |
+
# print("--> missing pred:", file_name)
|
| 405 |
+
continue
|
| 406 |
+
group_pair_list.append([file_name, {'gt': gt_info[file_name], 'det': detect_dict[file_name]}])
|
| 407 |
+
evaluation_group_info[group_name] = group_pair_list
|
| 408 |
+
|
| 409 |
+
res_info_all = {}
|
| 410 |
+
for group_name, group_pair_list in evaluation_group_info.items():
|
| 411 |
+
print(" ------- group name: {} -----------".format(group_name))
|
| 412 |
+
rets = multiproc(eval, group_pair_list, proc_num=16)
|
| 413 |
+
# import pdb; pdb.set_trace()
|
| 414 |
+
group_name_map_info = map_info.get(group_name, None) if map_info is not None else None
|
| 415 |
+
res_info = static_with_class(rets, iou_thresh=iou_thresh, map_info=group_name_map_info)
|
| 416 |
+
res_info_all[group_name] = res_info
|
| 417 |
+
|
| 418 |
+
evaluation_res_info_path = os.path.join(output_dir, "results_val.json")
|
| 419 |
+
with open(evaluation_res_info_path, "w") as f:
|
| 420 |
+
json.dump(res_info_all, f, ensure_ascii=False, indent=4)
|
| 421 |
+
print("--> info: evaluation result is saved at {}".format(evaluation_res_info_path))
|
| 422 |
+
return rets
|
| 423 |
+
|
| 424 |
+
if __name__ == "__main__":
|
| 425 |
+
|
| 426 |
+
if len(sys.argv) != 5:
|
| 427 |
+
print("Usage: python {} gt_json_path pred_json_path output_dir iou_thresh".format(__file__))
|
| 428 |
+
exit(-1)
|
| 429 |
+
else:
|
| 430 |
+
print('--> info: {}'.format(sys.argv))
|
| 431 |
+
gt_json_path, pred_json_path, output_dir, iou_thresh = sys.argv[1], sys.argv[2], sys.argv[3], float(sys.argv[4])
|
| 432 |
+
|
| 433 |
+
label_dict = load_gt_from_json(gt_json_path)
|
| 434 |
+
with open(pred_json_path, "r") as f:
|
| 435 |
+
detect_dict = json.load(f)
|
| 436 |
+
res_info = eval_and_show(label_dict, detect_dict, output_dir, iou_thresh=iou_thresh, map_info=None)
|
| 437 |
+
|
pix2text/doc_xl_layout/utils/image.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------------
|
| 2 |
+
# Copyright (c) Microsoft
|
| 3 |
+
# Licensed under the MIT License.
|
| 4 |
+
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
|
| 5 |
+
# Modified by Xingyi Zhou
|
| 6 |
+
# ------------------------------------------------------------------------------
|
| 7 |
+
|
| 8 |
+
from __future__ import absolute_import
|
| 9 |
+
from __future__ import division
|
| 10 |
+
from __future__ import print_function
|
| 11 |
+
|
| 12 |
+
import random
|
| 13 |
+
|
| 14 |
+
import cv2
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def flip(img):
|
| 19 |
+
return img[:, :, ::-1].copy()
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def transform_preds(coords, center, scale, output_size):
|
| 23 |
+
target_coords = np.zeros(coords.shape)
|
| 24 |
+
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
|
| 25 |
+
for p in range(coords.shape[0]):
|
| 26 |
+
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
|
| 27 |
+
return target_coords
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def get_affine_transform(center,
|
| 31 |
+
scale,
|
| 32 |
+
rot,
|
| 33 |
+
output_size,
|
| 34 |
+
shift=np.array([0, 0], dtype=np.float32),
|
| 35 |
+
inv=0):
|
| 36 |
+
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
|
| 37 |
+
scale = np.array([scale, scale], dtype=np.float32)
|
| 38 |
+
|
| 39 |
+
scale_tmp = scale
|
| 40 |
+
src_w = scale_tmp[0]
|
| 41 |
+
dst_w = output_size[0]
|
| 42 |
+
dst_h = output_size[1]
|
| 43 |
+
|
| 44 |
+
rot_rad = np.pi * rot / 180
|
| 45 |
+
src_dir = get_dir([0, src_w * -0.5], rot_rad)
|
| 46 |
+
dst_dir = np.array([0, dst_w * -0.5], np.float32)
|
| 47 |
+
|
| 48 |
+
src = np.zeros((3, 2), dtype=np.float32)
|
| 49 |
+
dst = np.zeros((3, 2), dtype=np.float32)
|
| 50 |
+
src[0, :] = center + scale_tmp * shift
|
| 51 |
+
src[1, :] = center + src_dir + scale_tmp * shift
|
| 52 |
+
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
|
| 53 |
+
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
|
| 54 |
+
|
| 55 |
+
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
|
| 56 |
+
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
|
| 57 |
+
|
| 58 |
+
if inv:
|
| 59 |
+
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
|
| 60 |
+
else:
|
| 61 |
+
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
|
| 62 |
+
|
| 63 |
+
return trans
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def affine_transform(pt, t):
|
| 67 |
+
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
|
| 68 |
+
new_pt = np.dot(t, new_pt)
|
| 69 |
+
return new_pt[:2]
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def get_3rd_point(a, b):
|
| 73 |
+
direct = a - b
|
| 74 |
+
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def get_dir(src_point, rot_rad):
|
| 78 |
+
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
|
| 79 |
+
|
| 80 |
+
src_result = [0, 0]
|
| 81 |
+
src_result[0] = src_point[0] * cs - src_point[1] * sn
|
| 82 |
+
src_result[1] = src_point[0] * sn + src_point[1] * cs
|
| 83 |
+
|
| 84 |
+
return src_result
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def crop(img, center, scale, output_size, rot=0):
|
| 88 |
+
trans = get_affine_transform(center, scale, rot, output_size)
|
| 89 |
+
|
| 90 |
+
dst_img = cv2.warpAffine(img,
|
| 91 |
+
trans,
|
| 92 |
+
(int(output_size[0]), int(output_size[1])),
|
| 93 |
+
flags=cv2.INTER_LINEAR)
|
| 94 |
+
|
| 95 |
+
return dst_img
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def gaussian_radius(det_size, min_overlap=0.7):
|
| 99 |
+
height, width = det_size
|
| 100 |
+
|
| 101 |
+
a1 = 1
|
| 102 |
+
b1 = (height + width)
|
| 103 |
+
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
|
| 104 |
+
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
|
| 105 |
+
r1 = (b1 + sq1) / 2
|
| 106 |
+
|
| 107 |
+
a2 = 4
|
| 108 |
+
b2 = 2 * (height + width)
|
| 109 |
+
c2 = (1 - min_overlap) * width * height
|
| 110 |
+
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
|
| 111 |
+
r2 = (b2 + sq2) / 2
|
| 112 |
+
|
| 113 |
+
a3 = 4 * min_overlap
|
| 114 |
+
b3 = -2 * min_overlap * (height + width)
|
| 115 |
+
c3 = (min_overlap - 1) * width * height
|
| 116 |
+
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
|
| 117 |
+
r3 = (b3 + sq3) / 2
|
| 118 |
+
return min(r1, r2, r3)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def gaussian2D(shape, sigma=1):
|
| 122 |
+
m, n = [(ss - 1.) / 2. for ss in shape]
|
| 123 |
+
y, x = np.ogrid[-m:m + 1, -n:n + 1]
|
| 124 |
+
|
| 125 |
+
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
|
| 126 |
+
h[h < np.finfo(h.dtype).eps * h.max()] = 0
|
| 127 |
+
return h
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def draw_umich_gaussian(heatmap, center, radius, k=1):
|
| 131 |
+
diameter = 2 * radius + 1
|
| 132 |
+
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
|
| 133 |
+
|
| 134 |
+
x, y = int(center[0]), int(center[1])
|
| 135 |
+
|
| 136 |
+
height, width = heatmap.shape[0:2]
|
| 137 |
+
|
| 138 |
+
left, right = min(x, radius), min(width - x, radius + 1)
|
| 139 |
+
top, bottom = min(y, radius), min(height - y, radius + 1)
|
| 140 |
+
|
| 141 |
+
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
|
| 142 |
+
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
|
| 143 |
+
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
|
| 144 |
+
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
|
| 145 |
+
return heatmap
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False):
|
| 149 |
+
diameter = 2 * radius + 1
|
| 150 |
+
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
|
| 151 |
+
value = np.array(value, dtype=np.float32).reshape(-1, 1, 1)
|
| 152 |
+
dim = value.shape[0]
|
| 153 |
+
reg = np.ones((dim, diameter * 2 + 1, diameter * 2 + 1), dtype=np.float32) * value
|
| 154 |
+
if is_offset and dim == 2:
|
| 155 |
+
delta = np.arange(diameter * 2 + 1) - radius
|
| 156 |
+
reg[0] = reg[0] - delta.reshape(1, -1)
|
| 157 |
+
reg[1] = reg[1] - delta.reshape(-1, 1)
|
| 158 |
+
|
| 159 |
+
x, y = int(center[0]), int(center[1])
|
| 160 |
+
|
| 161 |
+
height, width = heatmap.shape[0:2]
|
| 162 |
+
|
| 163 |
+
left, right = min(x, radius), min(width - x, radius + 1)
|
| 164 |
+
top, bottom = min(y, radius), min(height - y, radius + 1)
|
| 165 |
+
|
| 166 |
+
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
|
| 167 |
+
masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right]
|
| 168 |
+
masked_gaussian = gaussian[radius - top:radius + bottom,
|
| 169 |
+
radius - left:radius + right]
|
| 170 |
+
masked_reg = reg[:, radius - top:radius + bottom,
|
| 171 |
+
radius - left:radius + right]
|
| 172 |
+
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
|
| 173 |
+
idx = (masked_gaussian >= masked_heatmap).reshape(
|
| 174 |
+
1, masked_gaussian.shape[0], masked_gaussian.shape[1])
|
| 175 |
+
masked_regmap = (1 - idx) * masked_regmap + idx * masked_reg
|
| 176 |
+
regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap
|
| 177 |
+
return regmap
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def draw_msra_gaussian(heatmap, center, sigma):
|
| 181 |
+
tmp_size = sigma * 3
|
| 182 |
+
mu_x = int(center[0] + 0.5)
|
| 183 |
+
mu_y = int(center[1] + 0.5)
|
| 184 |
+
w, h = heatmap.shape[0], heatmap.shape[1]
|
| 185 |
+
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
|
| 186 |
+
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
|
| 187 |
+
if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
|
| 188 |
+
return heatmap
|
| 189 |
+
size = 2 * tmp_size + 1
|
| 190 |
+
x = np.arange(0, size, 1, np.float32)
|
| 191 |
+
y = x[:, np.newaxis]
|
| 192 |
+
x0 = y0 = size // 2
|
| 193 |
+
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
|
| 194 |
+
g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
|
| 195 |
+
g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
|
| 196 |
+
img_x = max(0, ul[0]), min(br[0], h)
|
| 197 |
+
img_y = max(0, ul[1]), min(br[1], w)
|
| 198 |
+
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(
|
| 199 |
+
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]],
|
| 200 |
+
g[g_y[0]:g_y[1], g_x[0]:g_x[1]])
|
| 201 |
+
return heatmap
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def grayscale(image):
|
| 205 |
+
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def lighting_(data_rng, image, alphastd, eigval, eigvec):
|
| 209 |
+
alpha = data_rng.normal(scale=alphastd, size=(3,))
|
| 210 |
+
image += np.dot(eigvec, eigval * alpha)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def blend_(alpha, image1, image2):
|
| 214 |
+
image1 *= alpha
|
| 215 |
+
image2 *= (1 - alpha)
|
| 216 |
+
image1 += image2
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def saturation_(data_rng, image, gs, gs_mean, var):
|
| 220 |
+
alpha = 1. + data_rng.uniform(low=-var, high=var)
|
| 221 |
+
blend_(alpha, image, gs[:, :, None])
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def brightness_(data_rng, image, gs, gs_mean, var):
|
| 225 |
+
alpha = 1. + data_rng.uniform(low=-var, high=var)
|
| 226 |
+
image *= alpha
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def contrast_(data_rng, image, gs, gs_mean, var):
|
| 230 |
+
alpha = 1. + data_rng.uniform(low=-var, high=var)
|
| 231 |
+
blend_(alpha, image, gs_mean)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def color_aug(data_rng, image, eig_val, eig_vec):
|
| 235 |
+
functions = [brightness_, contrast_, saturation_]
|
| 236 |
+
random.shuffle(functions)
|
| 237 |
+
|
| 238 |
+
gs = grayscale(image)
|
| 239 |
+
gs_mean = gs.mean()
|
| 240 |
+
for f in functions:
|
| 241 |
+
f(data_rng, image, gs, gs_mean, 0.4)
|
| 242 |
+
lighting_(data_rng, image, 0.1, eig_val, eig_vec)
|
pix2text/doc_xl_layout/utils/post_process.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import absolute_import
|
| 2 |
+
from __future__ import division
|
| 3 |
+
from __future__ import print_function
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from .ddd_utils import ddd2locrot
|
| 8 |
+
from .image import transform_preds
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def get_pred_depth(depth):
|
| 12 |
+
return depth
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def get_alpha(rot):
|
| 16 |
+
# output: (B, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos,
|
| 17 |
+
# bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]
|
| 18 |
+
# return rot[:, 0]
|
| 19 |
+
idx = rot[:, 1] > rot[:, 5]
|
| 20 |
+
alpha1 = np.arctan(rot[:, 2] / rot[:, 3]) + (-0.5 * np.pi)
|
| 21 |
+
alpha2 = np.arctan(rot[:, 6] / rot[:, 7]) + (0.5 * np.pi)
|
| 22 |
+
return alpha1 * idx + alpha2 * (1 - idx)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def ddd_post_process_2d(dets, c, s, opt):
|
| 26 |
+
# dets: batch x max_dets x dim
|
| 27 |
+
# return 1-based class det list
|
| 28 |
+
ret = []
|
| 29 |
+
include_wh = dets.shape[2] > 16
|
| 30 |
+
for i in range(dets.shape[0]):
|
| 31 |
+
top_preds = {}
|
| 32 |
+
dets[i, :, :2] = transform_preds(
|
| 33 |
+
dets[i, :, 0:2], c[i], s[i], (opt.output_w, opt.output_h))
|
| 34 |
+
classes = dets[i, :, -1]
|
| 35 |
+
for j in range(opt.num_classes):
|
| 36 |
+
inds = (classes == j)
|
| 37 |
+
top_preds[j + 1] = np.concatenate([
|
| 38 |
+
dets[i, inds, :3].astype(np.float32),
|
| 39 |
+
get_alpha(dets[i, inds, 3:11])[:, np.newaxis].astype(np.float32),
|
| 40 |
+
get_pred_depth(dets[i, inds, 11:12]).astype(np.float32),
|
| 41 |
+
dets[i, inds, 12:15].astype(np.float32)], axis=1)
|
| 42 |
+
if include_wh:
|
| 43 |
+
top_preds[j + 1] = np.concatenate([
|
| 44 |
+
top_preds[j + 1],
|
| 45 |
+
transform_preds(
|
| 46 |
+
dets[i, inds, 15:17], c[i], s[i], (opt.output_w, opt.output_h))
|
| 47 |
+
.astype(np.float32)], axis=1)
|
| 48 |
+
ret.append(top_preds)
|
| 49 |
+
return ret
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def ddd_post_process_3d(dets, calibs):
|
| 53 |
+
# dets: batch x max_dets x dim
|
| 54 |
+
# return 1-based class det list
|
| 55 |
+
ret = []
|
| 56 |
+
for i in range(len(dets)):
|
| 57 |
+
preds = {}
|
| 58 |
+
for cls_ind in dets[i].keys():
|
| 59 |
+
preds[cls_ind] = []
|
| 60 |
+
for j in range(len(dets[i][cls_ind])):
|
| 61 |
+
center = dets[i][cls_ind][j][:2]
|
| 62 |
+
score = dets[i][cls_ind][j][2]
|
| 63 |
+
alpha = dets[i][cls_ind][j][3]
|
| 64 |
+
depth = dets[i][cls_ind][j][4]
|
| 65 |
+
dimensions = dets[i][cls_ind][j][5:8]
|
| 66 |
+
wh = dets[i][cls_ind][j][8:10]
|
| 67 |
+
locations, rotation_y = ddd2locrot(
|
| 68 |
+
center, alpha, dimensions, depth, calibs[0])
|
| 69 |
+
bbox = [center[0] - wh[0] / 2, center[1] - wh[1] / 2,
|
| 70 |
+
center[0] + wh[0] / 2, center[1] + wh[1] / 2]
|
| 71 |
+
pred = [alpha] + bbox + dimensions.tolist() + \
|
| 72 |
+
locations.tolist() + [rotation_y, score]
|
| 73 |
+
preds[cls_ind].append(pred)
|
| 74 |
+
preds[cls_ind] = np.array(preds[cls_ind], dtype=np.float32)
|
| 75 |
+
ret.append(preds)
|
| 76 |
+
return ret
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def ddd_post_process(dets, c, s, calibs, opt):
|
| 80 |
+
# dets: batch x max_dets x dim
|
| 81 |
+
# return 1-based class det list
|
| 82 |
+
dets = ddd_post_process_2d(dets, c, s, opt)
|
| 83 |
+
dets = ddd_post_process_3d(dets, calibs)
|
| 84 |
+
return dets
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def ctdet_4ps_post_process(dets, c, s, h, w, num_classes):
|
| 88 |
+
# dets: batch x max_dets x dim
|
| 89 |
+
# return 1-based class det dict
|
| 90 |
+
ret = []
|
| 91 |
+
for i in range(dets.shape[0]):
|
| 92 |
+
top_preds = {}
|
| 93 |
+
dets[i, :, 0:2] = transform_preds(dets[i, :, 0:2], c[i], s[i], (w, h))
|
| 94 |
+
dets[i, :, 2:4] = transform_preds(dets[i, :, 2:4], c[i], s[i], (w, h))
|
| 95 |
+
dets[i, :, 4:6] = transform_preds(dets[i, :, 4:6], c[i], s[i], (w, h))
|
| 96 |
+
dets[i, :, 6:8] = transform_preds(dets[i, :, 6:8], c[i], s[i], (w, h))
|
| 97 |
+
classes = dets[i, :, 9]
|
| 98 |
+
for j in range(num_classes):
|
| 99 |
+
inds = (classes == j)
|
| 100 |
+
top_preds[j + 1] = np.concatenate([
|
| 101 |
+
dets[i, inds, :8].astype(np.float32),
|
| 102 |
+
dets[i, inds, 8:].astype(np.float32)], axis=1).tolist()
|
| 103 |
+
ret.append(top_preds)
|
| 104 |
+
return ret
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def ctdet_post_process(dets, c, s, h, w, num_classes):
|
| 108 |
+
# dets: batch x max_dets x dim
|
| 109 |
+
# return 1-based class det dict
|
| 110 |
+
ret = []
|
| 111 |
+
for i in range(dets.shape[0]):
|
| 112 |
+
top_preds = {}
|
| 113 |
+
dets[i, :, :2] = transform_preds(
|
| 114 |
+
dets[i, :, 0:2], c[i], s[i], (w, h))
|
| 115 |
+
dets[i, :, 2:4] = transform_preds(
|
| 116 |
+
dets[i, :, 2:4], c[i], s[i], (w, h))
|
| 117 |
+
classes = dets[i, :, -1]
|
| 118 |
+
for j in range(num_classes):
|
| 119 |
+
inds = (classes == j)
|
| 120 |
+
top_preds[j + 1] = np.concatenate([
|
| 121 |
+
dets[i, inds, :4].astype(np.float32),
|
| 122 |
+
dets[i, inds, 4:5].astype(np.float32)], axis=1).tolist()
|
| 123 |
+
ret.append(top_preds)
|
| 124 |
+
return ret
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def ctdet_corner_post_process(corner, c, s, h, w, num_classes):
|
| 128 |
+
corner[:, :2] = transform_preds(corner[:, 0:2], c[0], s[0], (w, h))
|
| 129 |
+
return corner
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def multi_pose_post_process(dets, c, s, h, w):
|
| 133 |
+
# dets: batch x max_dets x 40
|
| 134 |
+
# return list of 39 in image coord
|
| 135 |
+
ret = []
|
| 136 |
+
for i in range(dets.shape[0]):
|
| 137 |
+
bbox = transform_preds(dets[i, :, :4].reshape(-1, 2), c[i], s[i], (w, h))
|
| 138 |
+
pts = transform_preds(dets[i, :, 5:39].reshape(-1, 2), c[i], s[i], (w, h))
|
| 139 |
+
top_preds = np.concatenate(
|
| 140 |
+
[bbox.reshape(-1, 4), dets[i, :, 4:5],
|
| 141 |
+
pts.reshape(-1, 34)], axis=1).astype(np.float32).tolist()
|
| 142 |
+
ret.append({np.ones(1, dtype=np.int32)[0]: top_preds})
|
| 143 |
+
return ret
|