diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..1cf1df9110199449371df3536cb7ae773a75661a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,6 @@ *.7z filter=lfs diff=lfs merge=lfs -text +*.ipynb filter=lfs diff=lfs merge=lfs -text +*.pdf filter=lfs diff=lfs merge=lfs -text *.arrow filter=lfs diff=lfs merge=lfs -text *.bin filter=lfs diff=lfs merge=lfs -text *.bz2 filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..bd8cd18eb507aaaea59caa98a03af1bb0584a482 --- /dev/null +++ b/.gitignore @@ -0,0 +1,143 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +.DS_Store +data/ +models/ +output-*/ +outputs-*/ +outputs/ +*.jpg +*.jpeg +*.png +docs/feedbacks/ +*.tar +*.pth +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +.idea/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a11adbafa61de469117ae782cc5168bde7b89667 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,19 @@ +# Read the Docs configuration file for MkDocs projects +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.9" + +mkdocs: + configuration: mkdocs.yml + +# Optionally declare the Python requirements required to build your docs +python: + install: + - requirements: docs/requirements.txt diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..334b993f68bd352ca77efe9bf7e65e14cce19711 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 BreezeDeus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..3091378b9f6f51b4bf61fbdc7152571a3e720e7a --- /dev/null +++ b/Makefile @@ -0,0 +1,44 @@ +predict: + p2t predict -l en,ch_sim -a mfd -t yolov7_tiny -i docs/examples/mixed.jpg --save-analysis-res tmp-output.jpg +# p2t predict -l en,ch_sim --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' \ +# --use-analyzer -a mfd -t yolov7 --resized-shape 768 \ +# --analyzer-model-fp ~/.cnstd/1.2/analysis/mfd-yolov7-epoch224-20230613.pt \ +# --latex-ocr-model-fp ~/.pix2text/formula/p2t-mfr-20230702.pth \ +# -i docs/examples/mixed.jpg --save-analysis-res tmp-output.jpg +# p2t predict -l vi \ +# --use-analyzer -a mfd -t yolov7 --resized-shape 768 \ +# --analyzer-model-fp ~/.cnstd/1.2/analysis/mfd-yolov7-epoch224-20230613.pt \ +# --latex-ocr-model-fp ~/.pix2text/formula/p2t-mfr-20230702.pth \ +# -i docs/examples/vietnamese.jpg --save-analysis-res tmp-output.jpg +# p2t predict -l en,ch_tra \ +# --use-analyzer -a mfd -t yolov7 --resized-shape 768 \ +# --analyzer-model-fp ~/.cnstd/1.2/analysis/mfd-yolov7-epoch224-20230613.pt \ +# --latex-ocr-model-fp ~/.pix2text/formula/p2t-mfr-20230702.pth --rec-kwargs '{"det_bbox_max_expand_ratio": 0}'\ +# -i docs/examples/ch_tra7.jpg --save-analysis-res tmp-output.jpg + +evaluate-mfr: + p2t evaluate -l en,ch_sim --mfd-config '{"model_name": "mfd"}' \ + --formula-ocr-config '{"model_name":"mfr-1.5","model_backend":"onnx"}' \ + --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' \ + --resized-shape 768 --auto-line-break --file-type formula \ + --max-samples 50 --prefix-img-dir data \ + -i data/exported_call_events_with_images.json -o data/exported_cer_mfr1.0.json \ + --output-excel data/exported_cer_mfr1.0.xls --output-html data/exported_cer_mfr1.0.html + + +package: + rm -rf build + python setup.py sdist bdist_wheel + +VERSION := $(shell sed -n "s/^__version__ = '\(.*\)'/\1/p" pix2text/__version__.py) +upload: + python -m twine upload dist/pix2text-$(VERSION)* --verbose + +# 开启 OCR HTTP 服务 +serve: + p2t serve -l en,ch_sim -a mfd -t yolov7 --analyzer-model-fp ~/.cnstd/1.2/analysis/mfd-yolov7-epoch224-20230613.pt --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' + +docker-build: + docker build -t breezedeus/pix2text:v$(VERSION) . + +.PHONY: package upload serve daemon diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..20086e72ac0dc9641cc87f6063891d6c4d299b67 --- /dev/null +++ b/README.md @@ -0,0 +1,284 @@ +
+ +
 
+ +[![Discord](https://img.shields.io/discord/1200765964434821260?label=Discord)](https://discord.gg/GgD87WM8Tf) +[![Downloads](https://static.pepy.tech/personalized-badge/pix2text?period=total&units=international_system&left_color=grey&right_color=orange&left_text=Downloads)](https://pepy.tech/project/pix2text) +[![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fgithub.com%2Fbreezedeus%2FPix2Text&label=Visitors&countColor=%23ff8a65&style=flat&labelStyle=none)](https://visitorbadge.io/status?path=https%3A%2F%2Fgithub.com%2Fbreezedeus%2FPix2Text) +[![license](https://img.shields.io/github/license/breezedeus/pix2text)](./LICENSE) +[![PyPI version](https://badge.fury.io/py/pix2text.svg)](https://badge.fury.io/py/pix2text) +[![forks](https://img.shields.io/github/forks/breezedeus/pix2text)](https://github.com/breezedeus/pix2text) +[![stars](https://img.shields.io/github/stars/breezedeus/pix2text)](https://github.com/breezedeus/pix2text) +![last-release](https://img.shields.io/github/release-date/breezedeus/pix2text) +![last-commit](https://img.shields.io/github/last-commit/breezedeus/pix2text) +[![Twitter](https://img.shields.io/twitter/url?url=https%3A%2F%2Ftwitter.com%2Fbreezedeus)](https://twitter.com/breezedeus) + +[📖 Doc](https://pix2text.readthedocs.io) | +[👩🏻‍💻 Online Service](https://p2t.breezedeus.com) | +[👨🏻‍💻 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo) | +[💬 Contact](https://www.breezedeus.com/article/join-group) + +
+ +
+ +[中文](./README_cn.md) | English + +
+ + + +# Pix2Text + +## Update 2025.07.25: **V1.1.4** Released + +Major Changes: + +- Upgraded the Mathematical Formula Detection (MFD) and Mathematical Formula Recognition (MFR) models to version 1.5. All default configurations, documentation, and examples now use `mfd-1.5` and `mfr-1.5` as the standard models. + +## Update 2025.04.15: **V1.1.3** Released + +Major Changes: + +- Support for `VlmTableOCR` and `VlmTextFormulaOCR` models based on the VLM interface (see [LiteLLM documentation](https://docs.litellm.ai/docs/)) allowing the use of closed-source VLM models. Installation command: `pip install pix2text[vlm]`. + - Usage examples can be found in [tests/test_vlm.py](tests/test_vlm.py) and [tests/test_pix2text.py](tests/test_pix2text.py). + +## Update 2024.11.17: **V1.1.2** Released + +Major Changes: + +* A new layout analysis model [DocLayout-YOLO](https://github.com/opendatalab/DocLayout-YOLO) has been integrated, improving the accuracy of layout analysis. + +## Update 2024.06.18:**V1.1.1** Released + +Major changes: + +* Support the new mathematical formula detection models (MFD): [breezedeus/pix2text-mfd](https://huggingface.co/breezedeus/pix2text-mfd) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfd)), which significantly improves the accuracy of formula detection. + +See details: [Pix2Text V1.1.1 Released, Bringing Better Mathematical Formula Detection Models | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-v1.1.1). + +## Update 2024.04.28: **V1.1** Released + +Major changes: + +* Added layout analysis and table recognition models, supporting the conversion of images with complex layouts into Markdown format. See examples: [Pix2Text Online Documentation / Examples](https://pix2text.readthedocs.io/zh-cn/stable/examples_en/). +* Added support for converting entire PDF files to Markdown format. See examples: [Pix2Text Online Documentation / Examples](https://pix2text.readthedocs.io/zh-cn/stable/examples_en/). +* Enhanced the interface with more features, including adjustments to existing interface parameters. +* Launched the [Pix2Text Online Documentation](https://pix2text.readthedocs.io). + +## Update 2024.02.26: **V1.0** Released + +Main Changes: + +* The Mathematical Formula Recognition (MFR) model employs a new architecture and has been trained on a new dataset, achieving state-of-the-art (SOTA) accuracy. For detailed information, please see: [Pix2Text V1.0 New Release: The Best Open-Source Formula Recognition Model | Breezedeus.com](https://www.breezedeus.com/article/p2t-v1.0). + +See more at: [RELEASE.md](docs/RELEASE.md) . + +
+ +**Pix2Text (P2T)** aims to be a **free and open-source Python** alternative to **[Mathpix](https://mathpix.com/)**, and it can already accomplish **Mathpix**'s core functionality. **Pix2Text (P2T) can recognize layouts, tables, images, text, mathematical formulas, and integrate all of these contents into Markdown format. P2T can also convert an entire PDF file (which can contain scanned images or any other format) into Markdown format.** + +**Pix2Text (P2T)** integrates the following models: + +- **Layout Analysis Model**: [breezedeus/pix2text-layout](https://huggingface.co/breezedeus/pix2text-layout) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-layout)). +- **Table Recognition Model**: [breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-table-rec)). +- **Text Recognition Engine**: Supports **80+ languages** such as **English, Simplified Chinese, Traditional Chinese, Vietnamese**, etc. For English and Simplified Chinese recognition, it uses the open-source OCR tool [CnOCR](https://github.com/breezedeus/cnocr), while for other languages, it uses the open-source OCR tool [EasyOCR](https://github.com/JaidedAI/EasyOCR). +- **Mathematical Formula Detection Model (MFD)**: [breezedeus/pix2text-mfd-1.5](https://huggingface.co/breezedeus/pix2text-mfd-1.5) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfd-1.5)). Implemented based on [CnSTD](https://github.com/breezedeus/cnstd). +- **Mathematical Formula Recognition Model (MFR)**: [breezedeus/pix2text-mfr-1.5](https://huggingface.co/breezedeus/pix2text-mfr-1.5) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfr-1.5)). + +Several models are contributed by other open-source authors, and their contributions are highly appreciated. + +
+ Pix2Text Arch Flow +
+ +For detailed explanations, please refer to the [Pix2Text Online Documentation/Models](https://pix2text.readthedocs.io/zh-cn/stable/models/). + +
+ +As a Python3 toolkit, P2T may not be very user-friendly for those who are not familiar with Python. Therefore, we also provide a **[free-to-use P2T Online Web](https://p2t.breezedeus.com)**, where you can directly upload images and get P2T parsing results. The web version uses the latest models, resulting in better performance compared to the open-source models. + +If you're interested, feel free to add the assistant as a friend by scanning the QR code and mentioning `p2t`. The assistant will regularly invite everyone to join the group where the latest developments related to P2T tools will be announced: + +
+ Wechat-QRCode +
+ +The author also maintains a **Knowledge Planet** [**P2T/CnOCR/CnSTD Private Group**](https://t.zsxq.com/FEYZRJQ), where questions are answered promptly. You're welcome to join. The **knowledge planet private group** will also gradually release some private materials related to P2T/CnOCR/CnSTD, including **some unreleased models**, **discounts on purchasing premium models**, **code snippets for different application scenarios**, and answers to difficult problems encountered during use. The planet will also publish the latest research materials related to P2T/OCR/STD. + +For more contact method, please refer to [Contact](https://pix2text.readthedocs.io/zh-cn/stable/contact/). + + +## List of Supported Languages + +The text recognition engine of Pix2Text supports **`80+` languages**, including **English, Simplified Chinese, Traditional Chinese, Vietnamese**, etc. Among these, **English** and **Simplified Chinese** recognition utilize the open-source OCR tool **[CnOCR](https://github.com/breezedeus/cnocr)**, while recognition for other languages employs the open-source OCR tool **[EasyOCR](https://github.com/JaidedAI/EasyOCR)**. Special thanks to the respective authors. + +List of **Supported Languages** and **Language Codes** are shown below: + +
+↓↓↓ Click to show details ↓↓↓ + +| Language | Code Name | +| ------------------- | ----------- | +| Abaza | abq | +| Adyghe | ady | +| Afrikaans | af | +| Angika | ang | +| Arabic | ar | +| Assamese | as | +| Avar | ava | +| Azerbaijani | az | +| Belarusian | be | +| Bulgarian | bg | +| Bihari | bh | +| Bhojpuri | bho | +| Bengali | bn | +| Bosnian | bs | +| Simplified Chinese | ch_sim | +| Traditional Chinese | ch_tra | +| Chechen | che | +| Czech | cs | +| Welsh | cy | +| Danish | da | +| Dargwa | dar | +| German | de | +| English | en | +| Spanish | es | +| Estonian | et | +| Persian (Farsi) | fa | +| French | fr | +| Irish | ga | +| Goan Konkani | gom | +| Hindi | hi | +| Croatian | hr | +| Hungarian | hu | +| Indonesian | id | +| Ingush | inh | +| Icelandic | is | +| Italian | it | +| Japanese | ja | +| Kabardian | kbd | +| Kannada | kn | +| Korean | ko | +| Kurdish | ku | +| Latin | la | +| Lak | lbe | +| Lezghian | lez | +| Lithuanian | lt | +| Latvian | lv | +| Magahi | mah | +| Maithili | mai | +| Maori | mi | +| Mongolian | mn | +| Marathi | mr | +| Malay | ms | +| Maltese | mt | +| Nepali | ne | +| Newari | new | +| Dutch | nl | +| Norwegian | no | +| Occitan | oc | +| Pali | pi | +| Polish | pl | +| Portuguese | pt | +| Romanian | ro | +| Russian | ru | +| Serbian (cyrillic) | rs_cyrillic | +| Serbian (latin) | rs_latin | +| Nagpuri | sck | +| Slovak | sk | +| Slovenian | sl | +| Albanian | sq | +| Swedish | sv | +| Swahili | sw | +| Tamil | ta | +| Tabassaran | tab | +| Telugu | te | +| Thai | th | +| Tajik | tjk | +| Tagalog | tl | +| Turkish | tr | +| Uyghur | ug | +| Ukranian | uk | +| Urdu | ur | +| Uzbek | uz | +| Vietnamese | vi | + + +> Ref: [Supported Languages](https://www.jaided.ai/easyocr/) . + +
+ + + +## Online Service + +Everyone can use the **[P2T Online Service](https://p2t.breezedeus.com)** for free, with a daily limit of 10,000 characters per account, which should be sufficient for normal use. *Please refrain from bulk API calls, as machine resources are limited, and this could prevent others from accessing the service.* + +Due to hardware constraints, the Online Service currently only supports **Simplified Chinese** and **English** languages. To try the models in other languages, please use the following **Online Demo**. + +## Online Demo 🤗 + +You can also try the **[Online Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)** to see the performance of **P2T** in various languages. However, the online demo operates on lower hardware specifications and may be slower. For Simplified Chinese or English images, it is recommended to use the **[P2T Online Service](https://p2t.breezedeus.com)**. + +## Examples + +See: [Pix2Text Online Documentation/Examples](https://pix2text.readthedocs.io/zh-cn/stable/examples_en/). + +## Usage + +See: [Pix2Text Online Documentation/Usage](https://pix2text.readthedocs.io/zh-cn/stable/usage/). + +## Models + +See: [Pix2Text Online Documentation/Models](https://pix2text.readthedocs.io/zh-cn/stable/models/). + +## Install + +Well, one line of command is enough if it goes well. + +```bash +pip install pix2text +``` + +If you need to recognize languages other than **English** and **Simplified Chinese**, please use the following command to install additional packages: + +```bash +pip install pix2text[multilingual] +``` + +If the installation is slow, you can specify an installation source, such as using the Aliyun source: + +```bash +pip install pix2text -i https://mirrors.aliyun.com/pypi/simple +``` + +For more information, please refer to: [Pix2Text Online Documentation/Install](https://pix2text.readthedocs.io/zh-cn/stable/install/). + +## Command Line Tool + +See: [Pix2Text Online Documentation/Command Tool](https://pix2text.readthedocs.io/zh-cn/stable/command/). + +## HTTP Service + +See: [Pix2Text Online Documentation/Command Tool/Start Service](https://pix2text.readthedocs.io/zh-cn/stable/command/). + + +## MacOS Desktop Application + +Please refer to [Pix2Text-Mac](https://github.com/breezedeus/Pix2Text-Mac) for installing the Pix2Text Desktop App for MacOS. + +
+ Pix2Text Mac App +
+ + +## A cup of coffee for the author + +It is not easy to maintain and evolve the project, so if it is helpful to you, please consider [offering the author a cup of coffee 🥤](https://www.breezedeus.com/article/buy-me-coffee). + +--- + +Official code base: [https://github.com/breezedeus/pix2text](https://github.com/breezedeus/pix2text). Please cite it properly. + +For more information on Pix2Text (P2T), visit: [https://www.breezedeus.com/article/pix2text](https://www.breezedeus.com/article/pix2text). diff --git a/README_cn.md b/README_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..da651d45a3c4d60f4179a7408d7e85cb7011142a --- /dev/null +++ b/README_cn.md @@ -0,0 +1,288 @@ +
+ +
 
+ +[![Discord](https://img.shields.io/discord/1200765964434821260?label=Discord)](https://discord.gg/GgD87WM8Tf) +[![Downloads](https://static.pepy.tech/personalized-badge/pix2text?period=total&units=international_system&left_color=grey&right_color=orange&left_text=Downloads)](https://pepy.tech/project/pix2text) +[![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fgithub.com%2Fbreezedeus%2FPix2Text&label=Visitors&countColor=%23ff8a65&style=flat&labelStyle=none)](https://visitorbadge.io/status?path=https%3A%2F%2Fgithub.com%2Fbreezedeus%2FPix2Text) +[![license](https://img.shields.io/github/license/breezedeus/pix2text)](./LICENSE) +[![PyPI version](https://badge.fury.io/py/pix2text.svg)](https://badge.fury.io/py/pix2text) +[![forks](https://img.shields.io/github/forks/breezedeus/pix2text)](https://github.com/breezedeus/pix2text) +[![stars](https://img.shields.io/github/stars/breezedeus/pix2text)](https://github.com/breezedeus/pix2text) +![last-release](https://img.shields.io/github/release-date/breezedeus/pix2text) +![last-commit](https://img.shields.io/github/last-commit/breezedeus/pix2text) +[![Twitter](https://img.shields.io/twitter/url?url=https%3A%2F%2Ftwitter.com%2Fbreezedeus)](https://twitter.com/breezedeus) + +[📖 在线文档](https://pix2text.readthedocs.io) | +[👩🏻‍💻 网页版](https://p2t.breezedeus.com) | +[👨🏻‍💻 在线 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo) | +[💬 交流群](https://www.breezedeus.com/article/join-group) + +
+ +
+ +[English](./README.md) | 中文 + + +
+ +# Pix2Text (P2T) + +## Update 2025.07.25:发布 **V1.1.4** + +主要变更: + +- 数学公式检测(MFD)和数学公式识别(MFR)模型升级到 1.5 版本,所有默认配置、文档和示例均以 `mfd-1.5` 和 `mfr-1.5` 为标准模型。 + +## Update 2025.04.15:分布 **V1.1.3** + +主要变更: + +- 支持基于 VLM 接口(具体参考 [LiteLLM 文档](https://docs.litellm.ai/docs/))的 `VlmTableOCR` 和 `VlmTextFormulaOCR` 模型,可使用闭源 VLM 模型。安装命令:`pip install pix2text[vlm]`。 + - 使用方式见 [tests/test_vlm.py](tests/test_vlm.py) 和 [tests/test_pix2text.py](tests/test_pix2text.py)。 + +## Update 2024.11.17:发布 **V1.1.2** + +主要变更: + +* 版面分析模型加入 [DocLayout-YOLO](https://github.com/opendatalab/DocLayout-YOLO),提升版面分析的准确性。 + +## Update 2024.06.18:发布 **V1.1.1** + +主要变更: + +* 支持新的数学公式检测模型(MFD):[breezedeus/pix2text-mfd](https://huggingface.co/breezedeus/pix2text-mfd) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd)),公式检测精度获得较大提升。 + +具体说明请见:[Pix2Text V1.1.1 发布,带来更好的数学公式检测模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-v1.1.1)。 + +## Update 2024.04.28:发布 **V1.1** + +主要变更: + +* 加入了版面分析和表格识别模型,支持把复杂排版的图片转换为 Markdown 格式,示例见:[Pix2Text 在线文档/Examples](https://pix2text.readthedocs.io/zh-cn/stable/examples/)。 +* 支持把整个 PDF 文件转换为 Markdown 格式,示例见:[Pix2Text 在线文档/Examples](https://pix2text.readthedocs.io/zh-cn/stable/examples/)。 +* 加入了更丰富的接口,已有接口的参数也有所调整。 +* 上线了 [Pix2Text 在线文档](https://pix2text.readthedocs.io)。 + +## Update 2024.02.26:发布 **V1.0** + +主要变更: + +* 数学公式识别(MFR)模型使用新架构,在新的数据集上训练,获得了 SOTA 的精度。具体说明请见:[Pix2Text V1.0 新版发布:最好的开源公式识别模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-v1.0)。 + +了解更多:[RELEASE.md](docs/RELEASE.md) 。 + +
+ +**Pix2Text (P2T)** 期望成为 **[Mathpix](https://mathpix.com/)** 的**免费开源 Python** 替代工具,目前已经可以完成 **Mathpix** 的核心功能。 +**Pix2Text (P2T) 可以识别图片中的版面、表格、图片、文字、数学公式等内容,并整合所有内容后以 Markdown 格式输出。P2T 也可以把一整个 PDF 文件(PDF 的内容可以是扫描图片或者其他任何格式)转换为 Markdown 格式。** + +**Pix2Text (P2T)** 整合了以下模型: + +- **版面分析模型**:[breezedeus/pix2text-layout](https://huggingface.co/breezedeus/pix2text-layout) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-layout))。 +- **表格识别模型**:[breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-table-rec))。 +- **文字识别引擎**:支持 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 [CnOCR](https://github.com/breezedeus/cnocr) ,其他语言的识别使用的是开源 OCR 工具 [EasyOCR](https://github.com/JaidedAI/EasyOCR) 。 +- **数学公式检测模型(MFD)**:[breezedeus/pix2text-mfd-1.5](https://huggingface.co/breezedeus/pix2text-mfd-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd-1.5))。基于 [CnSTD](https://github.com/breezedeus/cnstd) 实现。 +- **数学公式识别模型(MFR)**:[breezedeus/pix2text-mfr-1.5](https://huggingface.co/breezedeus/pix2text-mfr-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfr-1.5))。 + +其中多个模型来自其他开源作者, 非常感谢他们的贡献。 + +
+ Pix2Text Arch Flow +
+ +具体说明请参考:[Pix2Text在线文档/模型](https://pix2text.readthedocs.io/zh-cn/stable/models/)。 + +
+ +P2T 作为Python3工具包,对于不熟悉Python的朋友不太友好,所以我们也发布了**可免费使用**的 **[P2T网页版](https://p2t.breezedeus.com)**,直接把图片丢进网页就能输出P2T的解析结果。**网页版会使用最新的模型,效果会比开源模型更好。** + +感兴趣的朋友欢迎扫码加小助手为好友,备注 `p2t`,小助手会定期统一邀请大家入群。群内会发布P2T相关工具的最新进展: + +
+ 微信群二维码 +
+ +作者也维护 **知识星球** [**P2T/CnOCR/CnSTD私享群**](https://t.zsxq.com/FEYZRJQ) ,这里面的提问会较快得到作者的回复,欢迎加入。**知识星球私享群**也会陆续发布一些P2T/CnOCR/CnSTD相关的私有资料,包括**部分未公开的模型**,**购买付费模型享优惠**,**不同应用场景的调用代码**,使用过程中遇到的难题解答等。星球也会发布P2T/OCR/STD相关的最新研究资料。 + + + +## 支持的语言列表 + +Pix2Text 的文字识别引擎支持 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 **[CnOCR](https://github.com/breezedeus/cnocr)** ,其他语言的识别使用的是开源 OCR 工具 **[EasyOCR](https://github.com/JaidedAI/EasyOCR)** ,感谢相关的作者们。 + +支持的**语言列表**和**语言代码**如下: +
+↓↓↓ Click to show details ↓↓↓ + + +| Language | Code Name | +| ------------------- | ----------- | +| Abaza | abq | +| Adyghe | ady | +| Afrikaans | af | +| Angika | ang | +| Arabic | ar | +| Assamese | as | +| Avar | ava | +| Azerbaijani | az | +| Belarusian | be | +| Bulgarian | bg | +| Bihari | bh | +| Bhojpuri | bho | +| Bengali | bn | +| Bosnian | bs | +| Simplified Chinese | ch_sim | +| Traditional Chinese | ch_tra | +| Chechen | che | +| Czech | cs | +| Welsh | cy | +| Danish | da | +| Dargwa | dar | +| German | de | +| English | en | +| Spanish | es | +| Estonian | et | +| Persian (Farsi) | fa | +| French | fr | +| Irish | ga | +| Goan Konkani | gom | +| Hindi | hi | +| Croatian | hr | +| Hungarian | hu | +| Indonesian | id | +| Ingush | inh | +| Icelandic | is | +| Italian | it | +| Japanese | ja | +| Kabardian | kbd | +| Kannada | kn | +| Korean | ko | +| Kurdish | ku | +| Latin | la | +| Lak | lbe | +| Lezghian | lez | +| Lithuanian | lt | +| Latvian | lv | +| Magahi | mah | +| Maithili | mai | +| Maori | mi | +| Mongolian | mn | +| Marathi | mr | +| Malay | ms | +| Maltese | mt | +| Nepali | ne | +| Newari | new | +| Dutch | nl | +| Norwegian | no | +| Occitan | oc | +| Pali | pi | +| Polish | pl | +| Portuguese | pt | +| Romanian | ro | +| Russian | ru | +| Serbian (cyrillic) | rs_cyrillic | +| Serbian (latin) | rs_latin | +| Nagpuri | sck | +| Slovak | sk | +| Slovenian | sl | +| Albanian | sq | +| Swedish | sv | +| Swahili | sw | +| Tamil | ta | +| Tabassaran | tab | +| Telugu | te | +| Thai | th | +| Tajik | tjk | +| Tagalog | tl | +| Turkish | tr | +| Uyghur | ug | +| Ukranian | uk | +| Urdu | ur | +| Uzbek | uz | +| Vietnamese | vi | + + +> Ref: [Supported Languages](https://www.jaided.ai/easyocr/) . + +
+ + + +## P2T 网页版 + +所有人都可以免费使用 **[P2T网页版](https://p2t.breezedeus.com)**,每人每天可以免费识别 10000 个字符,正常使用应该够用了。*请不要批量调用接口,机器资源有限,批量调用会导致其他人无法使用服务。* + +受限于机器资源,网页版当前只支持**简体中文和英文**,要尝试其他语言上的效果,请使用以下的**在线 Demo**。 + + + +## 在线 Demo 🤗 + +也可以使用 **[在线 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)**(无法科学上网可以使用 [国内镜像](https://hf.qhduan.com/spaces/breezedeus/Pix2Text-Demo)) 尝试 **P2T** 在不同语言上的效果。但在线 Demo 使用的硬件配置较低,速度会较慢。如果是简体中文或者英文图片,建议使用 **[P2T网页版](https://p2t.breezedeus.com)**。 + +## 示例 + +参见:[Pix2Text在线文档/示例](https://pix2text.readthedocs.io/zh-cn/stable/examples/)。 + +## 使用说明 + +参见:[Pix2Text在线文档/使用说明](https://pix2text.readthedocs.io/zh-cn/stable/usage/)。 + +## 模型下载 + +参见:[Pix2Text在线文档/模型](https://pix2text.readthedocs.io/zh-cn/stable/models/)。 + + + +## 安装 + +嗯,顺利的话一行命令即可。 + +```bash +pip install pix2text +``` + +如果需要识别**英文**与**简体中文**之外的文字,请使用以下命令安装额外的包: + +```bash +pip install pix2text[multilingual] +``` + +安装速度慢的话,可以指定国内的安装源,如使用阿里云的安装源: + +```bash +pip install pix2text -i https://mirrors.aliyun.com/pypi/simple +``` + +
+ +更多说明参见:[Pix2Text在线文档/安装](https://pix2text.readthedocs.io/zh-cn/stable/install/)。 + +## 命令行工具 + +参见:[Pix2Text在线文档/命令行工具](https://pix2text.readthedocs.io/zh-cn/stable/command/)。 + +## HTTP 服务 + +参见:[Pix2Text在线文档/命令行工具/开启服务](https://pix2text.readthedocs.io/zh-cn/stable/command/)。 + +## Mac 桌面客户端 + +请参考 [Pix2Text-Mac](https://github.com/breezedeus/Pix2Text-Mac) 安装 Pix2Text 的 MacOS 桌面客户端。 + +
+ Pix2Text Mac 客户端 +
+ + +## 给作者来杯咖啡 + +开源不易,如果此项目对您有帮助,可以考虑 [给作者加点油🥤,鼓鼓气💪🏻](https://www.breezedeus.com/article/buy-me-coffee) 。 + +--- + +官方代码库:[https://github.com/breezedeus/pix2text](https://github.com/breezedeus/pix2text) 。 + +Pix2Text (P2T) 更多信息:[https://www.breezedeus.com/article/pix2text_cn](https://www.breezedeus.com/article/pix2text_cn) 。 diff --git a/docs/RELEASE.md b/docs/RELEASE.md new file mode 100644 index 0000000000000000000000000000000000000000..4de3a709ee7745fab232c839406ab1c0e3fd98ed --- /dev/null +++ b/docs/RELEASE.md @@ -0,0 +1,372 @@ +# Release Notes + +# Update 2025.07.25: **V1.1.4** Released + +Major Changes: + +- Upgraded the Mathematical Formula Detection (MFD) and Mathematical Formula Recognition (MFR) models to version 1.5. All default configurations, documentation, and examples now use `mfd-1.5` and `mfr-1.5` as the standard models. + +主要变更: + +- 数学公式检测(MFD)和数学公式识别(MFR)模型升级到 1.5 版本,所有默认配置、文档和示例均以 `mfd-1.5` 和 `mfr-1.5` 为标准模型。 + +# Update 2025.05.06: **V1.1.3.2** Released + +Major Changes: + +- Fixed a potential error when processing transparent images, see [#171](https://github.com/breezedeus/Pix2Text/issues/171) for details. + +主要变更: + +- 修复了处理透明图片时可能出现的错误,具体见 [#171](https://github.com/breezedeus/Pix2Text/issues/171) 。 + +# Update 2025.04.27: **V1.1.3.1** Released + +Major Changes: + +- Bugfix: Fixed the issue of model import related to VLM. + +主要变更: + +- 修复了 VLM 相关的模型导入问题。 + +# Update 2025.04.15: **V1.1.3** Released + +Major Changes: + +- Support for `VlmTableOCR` and `VlmTextFormulaOCR` models based on the VLM interface (see [LiteLLM documentation](https://docs.litellm.ai/docs/)) allowing the use of closed-source VLM models. Installation command: `pip install pix2text[vlm]`. + - Usage examples can be found in [tests/test_vlm.py](tests/test_vlm.py) and [tests/test_pix2text.py](tests/test_pix2text.py). + +主要变更: + +- 支持基于 VLM 接口(具体参考 [LiteLLM 文档](https://docs.litellm.ai/docs/))的 `VlmTableOCR` 和 `VlmTextFormulaOCR` 模型,可使用闭源 VLM 模型。安装命令:`pip install pix2text[vlm]`。 + - 使用方式见 [tests/test_vlm.py](tests/test_vlm.py) 和 [tests/test_pix2text.py](tests/test_pix2text.py)。 + + +# Update 2024.12.17: **V1.1.2.3** Released + +Major Changes: + +- Bugfix: Fixed issues related to downloading models on Windows. + +主要变更: + +- 修复了在 Windows 环境下下载模型的问题。 + + +# Update 2024.12.11: **V1.1.2.2** Released + +Major Changes: + +- Bugfix: Resolved issues related to serialization errors when handling ONNX Runtime session options by ensuring that non-serializable configurations are managed appropriately. + +主要变更: + +- 修复了与 ONNX Runtime session options 相关的序列化错误,通过确保不可序列化的配置信息在适当的管理下进行处理。 + + +# Update 2024.12.02: **V1.1.2.1** Released + +Major Changes: + +* Fixed an error in `fetch_column_info()@DocYoloLayoutParser`, thanks to Bin. + +主要变更: + +* 修复了 fetch_column_info()@DocYoloLayoutParser 中的错误,感谢网友 Bin 。 + + +# Update 2024.11.17: **V1.1.2** Released + +Major Changes: + +* A new layout analysis model [DocLayout-YOLO](https://github.com/opendatalab/DocLayout-YOLO) has been integrated, improving the accuracy of layout analysis. +* Bug fixes: + * When the text language is set to English only, a dedicated English OCR model is used to avoid including Chinese in the output. + * The processing logic for PNG images has been optimized, enhancing recognition performance. + + +主要变更: + +* 版面分析模型加入 [DocLayout-YOLO](https://github.com/opendatalab/DocLayout-YOLO),提升版面分析的准确性。 +* 修复 bugs: + * 在设置文本语言只有英语时,使用专门的英文 OCR 模型,避免输出中包含中文。 + * 对 PNG 图片的处理逻辑进行了优化,提升了识别效果。 + + +# Update 2024.07.18: **V1.1.1.2** Released + +Major Changes: + +* fix bugs: + * https://github.com/breezedeus/Pix2Text/issues/129 + * https://github.com/breezedeus/Pix2Text/issues/116 + +主要变更: + +* 修复 bugs: + * https://github.com/breezedeus/Pix2Text/issues/129 + * https://github.com/breezedeus/Pix2Text/issues/116 + +# Update 2024.06.24: **V1.1.1.1** Released + +Major Changes: + +* Added a new parameter `static_resized_shape` when initializing `MathFormulaDetector`, which is used to resize the input image to a fixed size. Some formats of models require fixed-size input images during inference, such as `CoreML`. + +主要变更: + +* `MathFormulaDetector` 初始化时加入了参数 `static_resized_shape`, 用于把输入图片 resize 为固定大小。某些格式的模型在推理时需要固定大小的输入图片,如 `CoreML`。 + + +## Update 2024.06.18: **V1.1.1** Released + +Major changes: + +* Support the new mathematical formula detection models (MFD): [breezedeus/pix2text-mfd](https://huggingface.co/breezedeus/pix2text-mfd) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfd)), which significantly improves the accuracy of formula detection. + +See details: [Pix2Text V1.1.1 Released, Bringing Better Mathematical Formula Detection Models | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-v1.1.1). + +主要变更: + +* 支持新的数学公式检测模型(MFD):[breezedeus/pix2text-mfd](https://huggingface.co/breezedeus/pix2text-mfd) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd)),公式检测精度获得较大提升。 + +具体说明请见:[Pix2Text V1.1.1 发布,带来更好的数学公式检测模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-v1.1.1)。 + + +## Update 2024.06.17:**V1.1.0.7** Released + +Major changes: + +* adapted with cnstd>=1.2.4, thanks to [@g1y5x3](https://github.com/g1y5x3) . + +主要变更: + +* 适配 cnstd>=1.2.4 ,感谢 [@g1y5x3](https://github.com/g1y5x3) 。 + +## Update 2024.06.04:**V1.1.0.6** Released + +Major changes: + +* Fix: The Text OCR incorrectly carried over the configuration from previous calls when it was called multiple times. + +主要变更: + +* 修复 bug:Text OCR 多次调用时错误沿用了之前的配置信息。 + +## Update 2024.05.27:**V1.1.0.5** Released + +Major changes: + +* Fixed bugs such as that in `._parse_remaining`. + +主要变更: + +* 修复 `._parse_remaining` 等 bug。 + +## Update 2024.05.20:**V1.1.0.4** Released + +Major changes: + +* set `table_as_image` as `True` if `self.table_ocr` is not available. +* fix typo: https://github.com/breezedeus/Pix2Text/pull/108 . Thanks to [@billvsme](https://github.com/billvsme). + +主要变更: + +* 如果 `self.table_ocr` 不可用,将 `table_as_image` 设置为 `True`。 +* 修复拼写错误:https://github.com/breezedeus/Pix2Text/pull/108 。感谢 [@billvsme](https://github.com/billvsme)。 + +## Update 2024.05.19:**V1.1.0.3** Released + +Major changes: + +* A new paid model, `mfr-plus`, has been added, which offers better recognition for multi-line formulas. +* When recognizing only English, CnOCR does not output Chinese. +* Bugs have been fixed. + +主要变更: + +* 加入新的付费模型:`mfr-plus`,对多行公式的识别效果更好。 +* 在只识别英文时,CnOCR 不输出中文。 +* 修复 bugs。 + + +## Update 2024.05.10:**V1.1.0.2** Released + +Major changes: + +* Fixed the error caused by empty lines in `merge_line_texts`. + +主要变更: + +* 修复 `merge_line_texts` 中空行导致的错误。 + + +## Update 2024.04.30:**V1.1.0.1** Released + +Major changes: + +* Fix the exception occurring when saving files on Windows. + +主要变更: + +* 修复 Windows 下存储文件时出现的异常。 + + +## Update 2024.04.28:**V1.1** Released + +Major changes: + +* Added layout analysis and table recognition models, supporting the conversion of images with complex layouts into Markdown format. See examples: [Pix2Text Online Documentation / Examples](https://pix2text.readthedocs.io/zh-cn/latest/examples_en/). +* Added support for converting entire PDF files to Markdown format. See examples: [Pix2Text Online Documentation / Examples](https://pix2text.readthedocs.io/zh-cn/latest/examples_en/). +* Enhanced the interface with more features, including adjustments to existing interface parameters. +* Launched the [Pix2Text Online Documentation](https://pix2text.readthedocs.io). + +主要变更: + +* 加入了版面分析和表格识别模型,支持把复杂排版的图片转换为 Markdown 格式,示例见:[Pix2Text 在线文档/Examples](https://pix2text.readthedocs.io/zh-cn/latest/examples/)。 +* 支持把整个 PDF 文件转换为 Markdown 格式,示例见:[Pix2Text 在线文档/Examples](https://pix2text.readthedocs.io/zh-cn/latest/examples/)。 +* 加入了更丰富的接口,已有接口的参数也有所调整。 +* 上线了 [Pix2Text 在线文档](https://pix2text.readthedocs.io)。 + + +## Update 2024.03.30:**V1.0.2.3** Released + +Major changes: + +* Fixed the issue caused by `merge_line_texts`, see details at: https://github.com/breezedeus/Pix2Text/issues/84. +* Optimized the post-processing logic to handle some abnormal sequences. + +主要变更: + +* 修复 `merge_line_texts` 带来的错误,具体见:https://github.com/breezedeus/Pix2Text/issues/84 。 +* 优化了后处理逻辑,处理部分不正常的序列。 + +## Update 2024.03.18:**V1.0.2.2** Released + +Major changes: + +* The previously used `output_logits` argument is incompatible with transformers < 4.38.0, replaced by the `output_scores` argument. https://github.com/breezedeus/Pix2Text/issues/81 +* Fixed a bug in `serve.py` that was not compatible with the new pix2text version. + +主要变更: + +* 之前使用的 `output_logits` 参数不兼容 transformers < 4.38.0,换为 `output_scores` 参数。 https://github.com/breezedeus/Pix2Text/issues/81 +* 修复 `serve.py` 中未兼容新版接口的 bug。 + +## Update 2024.03.15:**V1.0.2.1** Released + +Major Changes: + +* Fixed mishandling of LaTeX expressions during post-processing, such as replacing `\rightarrow` with `arrow`. +* Added `rec_config` parameter to `.recognize_text()` and `.recognize_formula()` methods for passing additional parameters for recognition. + +主要变更: + +* 修复对 LaTeX 表达式进行后处理时引入的误操作,如 `\rightarrow` 被替换为 `arrow`。 +* 对 `.recognize_text()` 和 `.recognize_formula()` 加入了 `rec_config` 参数,以便传入用于识别的额外参数。 + +## Update 2024.03.14:**V1.0.2** Released + +Major Changes: + +* Optimized the recognition process, improving the recognition of boundary punctuation that may have been missed before. +* Enhanced the LaTeX recognition results by restoring the formula tags to the formulas. +* Adjusted the output format of the recognition results, adding the `return_text` parameter to control whether to return only text or more detailed information. When returning more detailed information, confidence score `score` and position information `position` will also be provided. Thanks to [@hiroi-sora](https://github.com/hiroi-sora) for the suggestion: https://github.com/breezedeus/Pix2Text/issues/67. + +主要变更: + +* 优化了识别的逻辑,以前可能漏识的边界标点现在可以比较好的识别。 +* 对 Latex 识别结果进行了优化,把公式的 tag 还原到公式中。 +* 调整了识别结果的输出格式,增加了参数 `return_text` 来控制结果是只返回文本还是更丰富的信息。当返回更丰富信息时,会返回置信度 `score` 以及位置信息 `position`。感谢 [@hiroi-sora](https://github.com/hiroi-sora) 的建议:https://github.com/breezedeus/Pix2Text/issues/67 。 + +## Update 2024.03.03:发布 **V1.0.1** + +主要变更: + +* 修复在 CUDA 环境下使用 `LatexOCR` 时出现的错误,具体见:https://github.com/breezedeus/Pix2Text/issues/65#issuecomment-1973037910 ,感谢 [@MSZ-006NOC](https://github.com/MSZ-006NOC)。 + + +## Update 2024.02.26:发布 **V1.0** + +主要变更: + +* 数学公式识别(MFR)模型使用新架构,在新的数据集上训练,获得了 SOTA 的精度。具体说明请见:[Pix2Text V1.0 新版发布:最好的开源公式识别模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-v1.0)。 + + +## Update 2024.01.10:发布 **V0.3** + +主要变更: + +* 支持识别 **`80+` 种语言**,详细语言列表见 [支持的语言列表](./README_cn.md#支持的语言列表); + +* 模型自动下载增加国内站点; + +* 优化对检测 boxes 的合并逻辑。 + + + +## Update 2023.12.21:发布 **V0.2.3.3** + +主要变更: + +* fix: bugfixed from [@hiroi-sora](https://github.com/hiroi-sora) , thanks much. + + + +## Update 2023.09.10:发布 **V0.2.3.2** + +主要变更: +* fix: 去掉 `consts.py` 无用的 `CATEGORY_MAPPINGS`。 + +## Update 2023.07.14:发布 **V0.2.3.1** + +主要变更: +* 修复了 `self.recognize_by_clf` 返回结果中不包含 `line_number` 字段导致 `merge_line_texts` 报错的bug。 + +## Update 2023.07.03:发布 **V0.2.3** + +主要变更: +* 优化了对检测出的boxes的排序逻辑,以及对混合图片的处理逻辑,使得最终识别效果更符合直觉。具体参考:[Pix2Text 新版公式识别模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-20230702) 。 +* 修复了模型文件自动下载的功能。HuggingFace似乎对下载文件的逻辑做了调整,导致之前版本的自动下载失败,当前版本已修复。但由于HuggingFace国内被墙,国内下载仍需 **梯子(VPN)**。 +* 更新了各个依赖包的版本号。 + + +## Update 2023.06.20:发布新版 MFD 模型 + +主要变更: +* 基于新标注的数据,重新训练了 **MFD YoloV7** 模型,目前新模型已部署到 [P2T网页版](https://p2t.breezedeus.com) 。具体说明见:[Pix2Text (P2T) 新版公式检测模型 | Breezedeus.com](https://www.breezedeus.com/article/p2t-mfd-20230613) 。 +* 之前的 MFD YoloV7 模型已开放给星球会员下载,具体说明见:[P2T YoloV7 数学公式检测模型开放给星球会员下载 | Breezedeus.com](https://www.breezedeus.com/article/p2t-yolov7-for-zsxq-20230619) 。 + + +## Update 2023.02.19:发布 **V0.2.2.1** + +主要变更: +* 修复bug。 + + +## Update 2023.02.19:发布 **V0.2.2** + +主要变更: +* 修复旋转框导致的识别结果错误; +* 去掉代码中不小心包含的 `breakpoint()`。 + + +## [Yanked] Update 2023.02.19:发布 **V0.2.1** + +主要变更: +* 增加后处理机制优化Latex-OCR的识别结果; +* 使用最新的 [CnSTD](https://github.com/breezedeus/cnstd) 和 [CnOCR](https://github.com/breezedeus/cnocr),它们修复了一些bug。 + +## Update 2023.02.03:发布 **V0.2** + +主要变更: +* 利用 **[CnSTD](https://github.com/breezedeus/cnstd)** 新版的**数学公式检测**(**Mathematical Formula Detection**,简称 **MFD**)能力,**P2T V0.2** 支持**识别既包含文字又包含公式的混合图片**。 + +## Update 2022.10.21:发布 V0.1.1 + +主要变更: +* Fix: remove the character which causes error on Windows + +## Update 2022.09.11:发布 V0.1 +* 初版发布 diff --git a/docs/buymeacoffee.md b/docs/buymeacoffee.md new file mode 100644 index 0000000000000000000000000000000000000000..bd1ea3150d11ccd5cf9a38733508c5f288f06d24 --- /dev/null +++ b/docs/buymeacoffee.md @@ -0,0 +1,35 @@ +# 给作者加油 (Sponsor the Author)🥤 + +虽然AI技术偶尔被用于作恶,但我更相信它能给人类和其他生命带来温暖。这是我创建和持续优化这些开源项目的最大动力。它们不是为了展示技术的强大,而是为了给有需要的人带来方便和帮助。通过对这些项目的捐赠,您可以和我一道让AI为更多人带来温暖和美好。 + +My unwavering love for artificial intelligence technology drives me to constantly seek new challenges and opportunities. This is why I have created these open-sourced projects, which aim not just to demonstrate technical prowess, but more importantly, to bring convenience and help to those who need it. I truly believe that these projects have the power to change lives for the better. Seeing the positive impact of my work fills me with a sense of happiness and pride that fuels my drive to continue creating and innovating. + +By supporting my projects through a donation, you can be a part of this journey and help me bring more warmth and humanity to the world of AI. + + +## 1. 知识星球 + +欢迎加入**知识星球** **[P2T/CnOCR/CnSTD私享群](https://t.zsxq.com/FEYZRJQ)**。**知识星球私享群**会陆续发布一些 CnOCR/CnSTD/P2T 相关的私有资料。 +关于星球会员享受福利的更详细说明请参考:[知识星球 | Breezedeus.com](https://www.breezedeus.com/article/zsxq)。 + +
+![知识星球二维码](https://cnocr.readthedocs.io/zh-cn/stable/cnocr-zsxq.jpeg){: style="width:280px"} +
+ + +## 2. 支付宝打赏 (Alipay reward) + +通过**支付宝**给作者打赏。 +Give the author a reward through Alipay. + +
+![支付宝收款码](https://cnocr.readthedocs.io/zh-cn/stable/cnocr-zfb.jpg){: style="width:280px"} +
+ + +## 3. Buy me a Coffee +If you are not in mainland China, you can also support the author through: + +
+Buy Me A Coffee +
diff --git a/docs/command.md b/docs/command.md new file mode 100644 index 0000000000000000000000000000000000000000..396acb8917d199801b2f216687449dba47c06250 --- /dev/null +++ b/docs/command.md @@ -0,0 +1,150 @@ +# 脚本工具 + +Python 包 **pix2text** 自带了命令行工具 `p2t`,[安装](install.md) 后即可使用。`p2t` 包含了以下几个子命令。 + +## 预测 + +使用命令 **`p2t predict`** 预测单个(图片或 PDF)文件或文件夹中所有图片(不支持同时预测多个 PDF 文件),以下是使用说明: + +```bash +$ p2t predict -h +Usage: p2t predict [OPTIONS] + + 使用Pix2Text(P2T)来预测图像或 PDF 文件中的文本信息 + +选项: + -l,--languages TEXT Text-OCR识别的语言代码,用逗号分隔,默认为en,ch_sim + --layout-config TEXT 布局解析器模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置 + --mfd-config TEXT MFD模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置 + --formula-ocr-config TEXT Latex-OCR数学公式识别模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置 + --text-ocr-config TEXT Text-OCR识别的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置 + --enable-formula / --disable-formula + 是否启用公式识别,默认值:启用公式 + --enable-table / --disable-table + 是否启用表格识别,默认值:启用表格 + -d, --device TEXT 选择使用`cpu`、`gpu`或指定的GPU,如`cuda:0`。默认值:cpu + --file-type [pdf|page|text_formula|formula|text] + 要处理的文件类型,'pdf'、'page'、'text_formula'、'formula'或'text'。默认值:text_formula + --resized-shape INTEGER 在处理之前将图像宽度调整为此大小。默认值:768 + -i, --img-file-or-dir TEXT 输入图像/pdf的文件路径或指定的目录。[必需] + --save-debug-res TEXT 如果设置了`save_debug_res`,则保存调试结果的目录;默认值为`None`,表示不保存 + --rec-kwargs TEXT 用于调用`.recognize()`的kwargs,以JSON字符串格式提供 + --return-text / --no-return-text + 是否仅返回文本结果,默认值:返回文本 + --auto-line-break / --no-auto-line-break + 是否自动确定是否将相邻的行结果合并为单个行结果,默认值:自动换行 + -o, --output-dir TEXT 识别文本结果的输出目录。仅在`file-type`为`pdf`或`page`时有效。默认值:output-md + --log-level TEXT 日志级别,例如`INFO`、`DEBUG`。默认值:INFO + -h, --help 显示此消息并退出。 +``` + +### 示例 1 +使用基础模型进行预测: + +```bash +p2t predict -l en,ch_sim --resized-shape 768 --file-type pdf -i docs/examples/test-doc.pdf -o output-md --save-debug-res output-debug +``` + +它会把识别结果(Markdown格式)存放在 `output-md` 目录下,并把中间的解析结果存放在 `output-debug` 目录下,以便分析识别结果主要受哪个模型的影响。 +如果不需要保存中间解析结果,可以去掉 `--save-debug-res output-debug` 参数。 + +### 示例 2 + +预测时也支持使用自定义的参数或模型。例如,使用自定义的模型进行预测: + +```bash +p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --rec-kwargs '{"page_numbers": [0, 1]}' --resized-shape 768 --file-type pdf -i docs/examples/test-doc.pdf -o output-md --save-debug-res output-debug +``` + + +## 开启服务 + +使用命令 **`p2t serve`** 开启一个 HTTP 服务,用于接收图片(当前不支持 PDF)并返回识别结果。 +这个 HTTP 服务是基于 FastAPI 实现的,以下是使用说明: + +```bash +$ p2t serve -h +Usage: p2t serve [OPTIONS] + + 启动HTTP服务。 + +选项: + -l, --languages TEXT Text-OCR识别的语言代码,用逗号分隔,默认为en,ch_sim + --layout-config TEXT 布局解析器模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置 + --mfd-config TEXT MFD模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置 + --formula-ocr-config TEXT Latex-OCR数学公式识别模型的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置 + --text-ocr-config TEXT Text-OCR识别的配置信息,以JSON字符串格式提供。默认值:`None`,表示使用默认配置 + --enable-formula / --disable-formula + 是否启用公式识别,默认值:启用公式 + --enable-table / --disable-table + 是否启用表格识别,默认值:启用表格 + -d, --device TEXT 选择使用`cpu`、`gpu`或指定的GPU,如`cuda:0`。默认值:cpu + -o, --output-md-root-dir TEXT Markdown输出的根目录,用于存放识别文本结果。仅在`file-type`为`pdf`或`page`时有效。默认值:output-md-root + -H, --host TEXT 服务器主机 [默认值:0.0.0.0] + -p, --port INTEGER 服务器端口 [默认值:8503] + --reload 当代码发生更改时是否重新加载服务器 + --log-level TEXT 日志级别,例如`INFO`、`DEBUG`。默认值:INFO + -h, --help 显示此消息并退出。 +``` + +### 示例 1 +使用基础模型进行预测: + +```bash +p2t serve -l en,ch_sim -H 0.0.0.0 -p 8503 +``` + +### 示例 2 + +服务开启时也支持使用自定义的参数或模型。例如,使用自定义的模型进行预测: + +```bash +p2t serve -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' -H 0.0.0.0 -p 8503 +``` + +### 服务调用 + +#### Python +开启后可以使用以下方式调用命令(Python): + +```python +import requests + +url = 'http://0.0.0.0:8503/pix2text' + +image_fp = 'docs/examples/page2.png' +data = { + "file_type": "page", + "resized_shape": 768, + "embed_sep": " $,$ ", + "isolated_sep": "$$\n, \n$$" +} +files = { + "image": (image_fp, open(image_fp, 'rb'), 'image/jpeg') +} + +r = requests.post(url, data=data, files=files) + +outs = r.json()['results'] +out_md_dir = r.json()['output_dir'] +if isinstance(outs, str): + only_text = outs +else: + only_text = '\n'.join([out['text'] for out in outs]) +print(f'{only_text=}') +print(f'{out_md_dir=}') +``` + +#### Curl + +也可以使用 curl 调用服务: + +```bash +curl -X POST \ + -F "file_type=page" \ + -F "resized_shape=768" \ + -F "embed_sep= $,$ " \ + -F "isolated_sep=$$\n, \n$$" \ + -F "image=@docs/examples/page2.png;type=image/jpeg" \ + http://0.0.0.0:8503/pix2text +``` \ No newline at end of file diff --git a/docs/contact.md b/docs/contact.md new file mode 100644 index 0000000000000000000000000000000000000000..16fff0e81bf0f28305577a915cb3276bc8ee9079 --- /dev/null +++ b/docs/contact.md @@ -0,0 +1,37 @@ + +# 交流群 +可通过以下方式与作者 [breezedeus](https://github.com/breezedeus) 进行沟通,也欢迎反馈使用过程中遇到的问题。 + +## 一、知识星球 [**P2T/CnOCR/CnSTD私享群**](https://t.zsxq.com/FEYZRJQ) + +作者维护 **知识星球** [**P2T/CnOCR/CnSTD私享群**](https://t.zsxq.com/FEYZRJQ) ,欢迎加入。**知识星球私享群**会陆续发布一些 P2T/CnOCR/CnSTD 相关的私有资料。 +关于星球会员享受福利的更详细说明请参考:[知识星球 | Breezedeus.com](https://www.breezedeus.com/article/zsxq)。 + +
+![知识星球二维码](figs/zsxq-qr-code.jpg){: style="width:280px"} +
+ + +## 二、微信交流群 + +扫码加小助手为好友,备注 `p2t`,小助手会定期统一邀请大家入群: + +
+![微信交流群](figs/wx-qr-code.JPG){: style="width:270px"} +
+ +正常情况小助手会定期邀请入群,但无法保证时间。如果期望尽快得到答复,可以加入上面的知识星球 [**P2T/CnOCR/CnSTD私享群**](https://t.zsxq.com/FEYZRJQ) 。 + + +## 三、Discord + +欢迎加入 [**Pix2Text Discord 服务器**](https://discord.gg/GgD87WM8Tf) 。 + +Welcome to join [**Pix2Text Discord Server**](https://discord.gg/GgD87WM8Tf) . + + +## 四、邮件 / Email + +**邮箱**:breezedeus AT gmail.com,看的不勤,除非其他方式联系不上。 + +Email: breezedeus AT gmail.com . diff --git a/docs/demo.md b/docs/demo.md new file mode 100644 index 0000000000000000000000000000000000000000..95b43e7f7981d0cc256eb9095efc1ed6819e48d0 --- /dev/null +++ b/docs/demo.md @@ -0,0 +1,20 @@ +## P2T 网页版 + +所有人都可以免费使用 **[P2T网页版](https://p2t.breezedeus.com)**,每人每天可以免费识别 10000 个字符,正常使用应该够用了。如果无法打开,请尝试科学上网。*请不要批量调用接口,机器资源有限,批量调用会导致其他人无法使用服务。* + +受限于机器资源,网页版当前只支持**简体中文和英文**,要尝试其他语言上的效果,请使用以下的**在线 Demo**。 + +
+![P2T网页版](https://pic2.zhimg.com/80/v2-4b48d8e9b10ac620244a5a22f98379c5_720w.webp) +
+ + +## 在线 Demo 🤗 + +也可以使用 **[在线 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)**(无法科学上网可以使用 [国内镜像](https://hf.qhduan.com/spaces/breezedeus/Pix2Text-Demo)) 尝试 **P2T** 在不同语言上的效果。但在线 Demo 使用的硬件配置较低,速度会较慢。如果是简体中文或者英文图片,建议使用 **[P2T网页版](https://p2t.breezedeus.com)**。 + +
+![在线 Demo](https://pic3.zhimg.com/80/v2-ebe8d3d955a580a297aabcd27439604e_720w.webp) +
+ +更多说明请参考 [Pix2Text 主页](https://www.breezedeus.com/article/pix2text_cn) 。 \ No newline at end of file diff --git a/docs/examples.md b/docs/examples.md new file mode 100644 index 0000000000000000000000000000000000000000..1d37fb4f0a13c4dfa305a912fb1d55cdeac9feed --- /dev/null +++ b/docs/examples.md @@ -0,0 +1,221 @@ +
+ +[English](examples_en.md) | 中文 + +
+ +# 示例 +## 识别 PDF 文件,返回其 Markdown 格式 + +对于 PDF 文件,可以使用函数 `.recognize_pdf()` 对整个文件或者指定页进行识别,并把结果输出为 Markdown 文件。如针对以下 PDF 文件 ([examples/test-doc.pdf](examples/test-doc.pdf)), +调用方式如下: + +```python +from pix2text import Pix2Text + +img_fp = './examples/test-doc.pdf' +p2t = Pix2Text.from_config() +doc = p2t.recognize_pdf(img_fp, page_numbers=[0, 1]) +doc.to_markdown('output-md') # 导出的 Markdown 信息保存在 output-md 目录中 +``` + +也可以使用命令行完成一样的功能,如下面命令使用了付费版模型(MFD + MFR + CnOCR 三个付费模型)进行识别: + +```bash +p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --rec-kwargs '{"page_numbers": [0, 1]}' --resized-shape 768 --file-type pdf -i docs/examples/test-doc.pdf -o output-md --save-debug-res output-debug +``` + +识别结果见 [output-md/output.md](output-md/output.md)。 + +
+ +> 如果期望导出 Markdown 之外的其他格式,如 Word、HTML、PDF 等,推荐使用工具 [Pandoc](https://pandoc.org) 对 Markdown 结果进行转换即可。 + +## 识别带有复杂排版的图片 +可以使用函数 `.recognize_page()` 识别图片中的文字和数学公式。如针对以下图片 ([examples/page2.png](examples/page2.png)): + +
+![Page-image](examples/page2.png){: style="width:600px"} +
+ +调用方式如下: + +```python +from pix2text import Pix2Text + +img_fp = './examples/test-doc.pdf' +p2t = Pix2Text.from_config() +page = p2t.recognize_page(img_fp) +page.to_markdown('output-page') # 导出的 Markdown 信息保存在 output-page 目录中 +``` + +也可以使用命令行完成一样的功能,如下面命令使用了付费版模型(MFD + MFR + CnOCR 三个付费模型)进行识别: + +```bash +p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type page -i docs/examples/page2.png -o output-page --save-debug-res output-debug-page +``` + +识别结果和 [output-md/output.md](output-md/output.md) 类似。 + +## 识别既有公式又有文本的段落图片 + +对于既有公式又有文本的段落图片,识别时不需要使用版面分析模型。 +可以使用函数 `.recognize_text_formula()` 识别图片中的文字和数学公式。如针对以下图片 ([examples/en1.jpg](examples/en1.jpg)): + +
+![English-mixed-image](examples/en1.jpg){: style="width:600px"} +
+ +调用方式如下: + +```python +from pix2text import Pix2Text, merge_line_texts + +img_fp = './examples/en1.jpg' +p2t = Pix2Text.from_config() +outs = p2t.recognize_text_formula(img_fp, resized_shape=768, return_text=True) +print(outs) +``` + +返回结果 `outs` 是个 `dict`,其中 key `position` 表示Box位置信息,`type` 表示类别信息,而 `text` 表示识别的结果。具体说明见[接口说明](#接口说明)。 + +也可以使用命令行完成一样的功能,如下面命令使用了付费版模型(MFD + MFR + CnOCR 三个付费模型)进行识别: + +```bash +p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg --save-debug-res out-debug-en1.jpg +``` + +或者使用免费开源模型进行识别: + +```bash +p2t predict -l en,ch_sim --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg --save-debug-res out-debug-en1.jpg +``` + +## 识别纯公式图片 + +对于只包含数学公式的图片,使用函数 `.recognize_formula()` 可以把数学公式识别为 LaTeX 表达式。如针对以下图片 ([examples/math-formula-42.png](examples/math-formula-42.png)): + +
+![Pure-Math-Formula-image](examples/math-formula-42.png){: style="width:300px"} +
+ + +调用方式如下: + +```python +from pix2text import Pix2Text + +img_fp = './examples/math-formula-42.png' +p2t = Pix2Text.from_config() +outs = p2t.recognize_formula(img_fp) +print(outs) +``` + +返回结果为字符串,即对应的 LaTeX 表达式。具体说明见[说明](usage.md)。 + +也可以使用命令行完成一样的功能,如下面命令使用了付费版模型(MFR 一个付费模型)进行识别: + +```bash +p2t predict -l en,ch_sim --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --file-type formula -i docs/examples/math-formula-42.png +``` + +或者使用免费开源模型进行识别: + +```bash +p2t predict -l en,ch_sim --file-type formula -i docs/examples/math-formula-42.png +``` + +## 识别纯文字图片 + +对于只包含文字不包含数学公式的图片,使用函数 `.recognize_text()` 可以识别出图片中的文字。此时 Pix2Text 相当于一般的文字 OCR 引擎。如针对以下图片 ([examples/general.jpg](examples/general.jpg)): + +
+![Scene-Text](examples/general.jpg){: style="width:400px"} +
+ + +调用方式如下: + +```python +from pix2text import Pix2Text + +img_fp = './examples/general.jpg' +p2t = Pix2Text.from_config() +outs = p2t.recognize_text(img_fp) +print(outs) +``` + +返回结果为字符串,即对应的文字序列。具体说明见[接口说明](https://pix2text.readthedocs.io/zh-cn/latest/pix2text/pix_to_text/)。 + +也可以使用命令行完成一样的功能,如下面命令使用了付费版模型(CnOCR 一个付费模型)进行识别: + +```bash +p2t predict -l en,ch_sim --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --file-type text --no-return-text -i docs/examples/general.jpg --save-debug-res out-debug-general.jpg +``` + +或者使用免费开源模型进行识别: + +```bash +p2t predict -l en,ch_sim --file-type text --no-return-text -i docs/examples/general.jpg --save-debug-res out-debug-general.jpg +``` + + +## 针对不同语言 + +### 英文 + +**识别效果**: + +![Pix2Text 识别英文](figs/output-en.jpg) + +**识别命令**: + +```bash +p2t predict -l en --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg +``` + +### 简体中文 + +**识别效果**: + +![Pix2Text 识别简体中文](figs/output-ch_sim.jpg) + +**识别命令**: + +```bash +p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --auto-line-break --file-type text_formula -i docs/examples/mixed.jpg --save-debug-res out-debug-mixed.jpg +``` + +### 繁体中文 + +**识别效果**: + +![Pix2Text 识别繁体中文](figs/output-ch_tra.jpg) + +**识别命令**: + +```bash +p2t predict -l en,ch_tra --mfd-config '{"model_name": "mfd-pro", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --resized-shape 768 --auto-line-break --file-type text_formula -i docs/examples/ch_tra.jpg --save-debug-res out-debug-tra.jpg +``` + +> 注意 ⚠️ :请通过以下命令安装 pix2text 的多语言版本: +> ```bash +> pip install pix2text[multilingual] +> ``` + + +### 越南语 +**识别效果**: + +![Pix2Text 识别越南语](figs/output-vietnamese.jpg) + +**识别命令**: + +```bash +p2t predict -l en,vi --mfd-config '{"model_name": "mfd-pro", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro","model_backend":"onnx"}' --resized-shape 608 --no-auto-line-break --file-type text_formula -i docs/examples/vietnamese.jpg --save-debug-res out-debug-vi.jpg +``` + +> 注意 ⚠️ :请通过以下命令安装 pix2text 的多语言版本: +> ```bash +> pip install pix2text[multilingual] +> ``` diff --git a/docs/examples/test-doc.pdf b/docs/examples/test-doc.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9deeb4c32963c21df2d3b9349a4d1cf71ba8af87 --- /dev/null +++ b/docs/examples/test-doc.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:746024d672224466f2fbcc46385afe71e186b3d6542ae4c7132f7fd9aac36ac7 +size 1631522 diff --git a/docs/examples_en.md b/docs/examples_en.md new file mode 100644 index 0000000000000000000000000000000000000000..4ddd20a7330aac9d7f626347196e9a7e3bcf6826 --- /dev/null +++ b/docs/examples_en.md @@ -0,0 +1,219 @@ +
+ +[中文](examples.md) | English + +
+ +# Examples +## Recognize PDF Files and Return Markdown Format + +For PDF files, you can use the `.recognize_pdf()` function to recognize the entire file or specific pages and output the results as a Markdown file. For example, for the following PDF file ([examples/test-doc.pdf](examples/test-doc.pdf)), +you can call the function like this: + +```python +from pix2text import Pix2Text + +img_fp = './examples/test-doc.pdf' +p2t = Pix2Text.from_config() +doc = p2t.recognize_pdf(img_fp, page_numbers=[0, 1]) +doc.to_markdown('output-md') # The exported Markdown information is saved in the output-md directory +``` + +You can also achieve the same functionality using the command line. Below is a command that uses the premium models (MFD + MFR + CnOCR) for recognition: + +```bash +p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --rec-kwargs '{"page_numbers": [0, 1]}' --resized-shape 768 --file-type pdf -i docs/examples/test-doc.pdf -o output-md --save-debug-res output-debug +``` + +The recognition result can be found in [output-md/output.md](output-md/output.md). + +
+ +> If you wish to export formats other than Markdown, such as Word, HTML, PDF, etc., it is recommended to use the tool [Pandoc](https://pandoc.org) to convert the Markdown result. + +## Recognize Images with Complex Layout + +You can use the `.recognize_page()` function to recognize text and mathematical formulas in images. For example, for the following image ([examples/page2.png](examples/page2.png)): + +
+![Page-image](examples/page2.png){: style="width:600px"} +
+ +You can call the function like this: + +```python +from pix2text import Pix2Text + +img_fp = './examples/test-doc.pdf' +p2t = Pix2Text.from_config() +page = p2t.recognize_page(img_fp) +page.to_markdown('output-page') # The exported Markdown information is saved in the output-page directory +``` + +You can also achieve the same functionality using the command line. Below is a command that uses the premium models (MFD + MFR + CnOCR) for recognition: + +```bash +p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type page -i docs/examples/page2.png -o output-page --save-debug-res output-debug-page +``` + +The recognition result is similar to [output-md/output.md](output-md/output.md). + + +## Recognize Paragraph Images with Both Formulas and Texts + +For paragraph images containing both formulas and texts, you don't need to use the layout analysis model. You can use the `.recognize_text_formula()` function to recognize both texts and mathematical formulas in the image. For example, for the following image ([examples/en1.jpg](examples/en1.jpg)): + +
+![English-mixed-image](examples/en1.jpg){: style="width:600px"} +
+ +You can call the function like this: + +```python +from pix2text import Pix2Text, merge_line_texts + +img_fp = './examples/en1.jpg' +p2t = Pix2Text.from_config() +outs = p2t.recognize_text_formula(img_fp, resized_shape=768, return_text=True) +print(outs) +``` + +The returned result `outs` is a dictionary, where the key `position` represents the box position information, `type` represents the category information, and `text` represents the recognition result. For detailed explanations, see [API Documentation](#api-documentation). + +You can also achieve the same functionality using the command line. Below is a command that uses the premium models (MFD + MFR + CnOCR) for recognition: + +```bash +p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg --save-debug-res out-debug-en1.jpg +``` + +Or use the free open-source models for recognition: + +```bash +p2t predict -l en,ch_sim --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg --save-debug-res out-debug-en1.jpg +``` + +## Recognize Pure Formula Images + +For images containing only mathematical formulas, you can use the `.recognize_formula()` function to recognize the formulas as LaTeX expressions. For example, for the following image ([examples/math-formula-42.png](examples/math-formula-42.png)): + +
+![Pure-Math-Formula-image](examples/math-formula-42.png){: style="width:300px"} +
+ +You can call the function like this: + +```python +from pix2text import Pix2Text + +img_fp = './examples/math-formula-42.png' +p2t = Pix2Text.from_config() +outs = p2t.recognize_formula(img_fp) +print(outs) +``` + +The returned result is a string representing the corresponding LaTeX expression. For detailed explanations, see [Usage](usage.md). + +You can also achieve the same functionality using the command line. Below is a command that uses the premium model (MFR) for recognition: + +```bash +p2t predict -l en,ch_sim --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --file-type formula -i docs/examples/math-formula-42.png +``` + +Or use the free open-source model for recognition: + +```bash +p2t predict -l en,ch_sim --file-type formula -i docs/examples/math-formula-42.png +``` + +## Recognize Pure Text Images + +For images containing only text without mathematical formulas, you can use the `.recognize_text()` function to recognize the text in the image. In this case, Pix2Text acts as a general text OCR engine. For example, for the following image ([examples/general.jpg](examples/general.jpg)): + +
+![Scene-Text](examples/general.jpg){: style="width:400px"} +
+ +You can call the function like this: + +```python +from pix2text import Pix2Text + +img_fp = './examples/general.jpg' +p2t = Pix2Text.from_config() +outs = p2t.recognize_text(img_fp) +print(outs) +``` + +The returned result is a string representing the corresponding text sequence. For detailed explanations, see [API Documentation](https://pix2text.readthedocs.io/zh-cn/latest/pix2text/pix_to_text/). + +You can also achieve the same functionality using the command line. Below is a command that uses the premium model (CnOCR) for recognition: + +```bash +p2t predict -l en,ch_sim --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --file-type text --no-return-text -i docs/examples/general.jpg --save-debug-res out-debug-general.jpg +``` + +Or use the free open-source model for recognition: + +```bash +p2t predict -l en,ch_sim --file-type text --no-return-text -i docs/examples/general.jpg --save-debug-res out-debug-general.jpg +``` + +## For Different Languages + +### English + +**Recognition Result**: + +![Pix2Text Recognizing English](figs/output-en.jpg) + +**Recognition Command**: + +```bash +p2t predict -l en --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --file-type text_formula -i docs/examples/en1.jpg +``` + +### Simplified Chinese + +**Recognition Result**: + +![Pix2Text Recognizing Simplified Chinese](figs/output-ch_sim.jpg) + +**Recognition Command**: + +```bash +p2t predict -l en,ch_sim --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --text-ocr-config '{"rec_model_name": "doc-densenet_lite_666-gru_large"}' --resized-shape 768 --auto-line-break --file-type text_formula -i docs/examples/mixed.jpg --save-debug-res out-debug-mixed.jpg +``` + +### Traditional Chinese + +**Recognition Result**: + +![Pix2Text Recognizing Traditional Chinese](figs/output-ch_tra.jpg) + +**Recognition Command**: + +```bash +p2t predict -l en,ch_tra --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --resized-shape 768 --auto-line-break --file-type text_formula -i docs/examples/ch_tra.jpg --save-debug-res out-debug-tra.jpg +``` + +> Note ⚠️: Please install the multilingual version of pix2text using the following command: +> ```bash +> pip install pix2text[multilingual] +> ``` + +### Vietnamese + +**Recognition Result**: + +![Pix2Text Recognizing Vietnamese](figs/output-vietnamese.jpg) + +**Recognition Command**: + +```bash +p2t predict -l en,vi --mfd-config '{"model_name": "mfd-pro-1.5", "model_backend": "onnx"}' --formula-ocr-config '{"model_name":"mfr-pro-1.5","model_backend":"onnx"}' --resized-shape 608 --no-auto-line-break --file-type text_formula -i docs/examples/vietnamese.jpg --save-debug-res out-debug-vi.jpg +``` + +> Note ⚠️: Please install the multilingual version of pix2text using the following command: +> ```bash +> pip install pix2text[multilingual] +> ``` \ No newline at end of file diff --git a/docs/faq.md b/docs/faq.md new file mode 100644 index 0000000000000000000000000000000000000000..48ef9733506d912ab77976e2ff5caabab4854b56 --- /dev/null +++ b/docs/faq.md @@ -0,0 +1,8 @@ +# 常见问题(FAQ) + +## Pix2Text 是免费的吗? + +Pix2Text 代码和基础模型是免费的,而且是开源的。可以按需自行调整发布或商业使用。 + +但请注意,Pix2Text 的不同付费模型包含不同的 license,购买时请参考具体的 license 说明。 + diff --git a/docs/figs/breezedeus.ico b/docs/figs/breezedeus.ico new file mode 100644 index 0000000000000000000000000000000000000000..b070848947e791d69303f4dafc18a36a2ec740c1 Binary files /dev/null and b/docs/figs/breezedeus.ico differ diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000000000000000000000000000000000000..f5cae58d90c3a079049c2d13aa121280273be508 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,263 @@ +
+![Pix2Text](figs/p2t-logo.png){: style="width:180px"} +
+ +# Pix2Text (P2T) +[![Discord](https://img.shields.io/discord/1200765964434821260?label=Discord)](https://discord.gg/GgD87WM8Tf) +[![Downloads](https://static.pepy.tech/personalized-badge/pix2text?period=total&units=international_system&left_color=grey&right_color=orange&left_text=Downloads)](https://pepy.tech/project/pix2text) +[![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fpix2text.readthedocs.io%2Fzh-cn%2Fstable%2F&label=Visitors&countColor=%23f5c791&style=flat&labelStyle=none)](https://visitorbadge.io/status?path=https%3A%2F%2Fpix2text.readthedocs.io%2Fzh-cn%2Fstable%2F) +[![license](https://img.shields.io/github/license/breezedeus/pix2text)](./LICENSE) +[![PyPI version](https://badge.fury.io/py/pix2text.svg)](https://badge.fury.io/py/pix2text) +[![forks](https://img.shields.io/github/forks/breezedeus/pix2text)](https://github.com/breezedeus/pix2text) +[![stars](https://img.shields.io/github/stars/breezedeus/pix2text)](https://github.com/breezedeus/pix2text) +![last-release](https://img.shields.io/github/release-date/breezedeus/pix2text) +![last-commit](https://img.shields.io/github/last-commit/breezedeus/pix2text) +[![Twitter](https://img.shields.io/twitter/url?url=https%3A%2F%2Ftwitter.com%2Fbreezedeus)](https://twitter.com/breezedeus) + +
+[📖 使用](usage.md) | +[🛠️ 安装](install.md) | +[🧳 模型](models.md) | +[🛀🏻 在线Demo](demo.md) | +[💬 交流群](contact.md) + +[English](index_en.md) | 中文 +
+ +**Pix2Text (P2T)** 期望成为 **[Mathpix](https://mathpix.com/)** 的**免费开源 Python** 替代工具,目前已经可以完成 **Mathpix** 的核心功能。 +**Pix2Text (P2T) 可以识别图片中的版面、表格、图片、文字、数学公式等内容,并整合所有内容后以 Markdown 格式输出。P2T 也可以把一整个 PDF 文件(PDF 的内容可以是扫描图片或者其他任何格式)转换为 Markdown 格式。** + +**Pix2Text (P2T)** 整合了以下模型: + +- **版面分析模型**:[breezedeus/pix2text-layout-docyolo](https://huggingface.co/breezedeus/pix2text-layout-docyolo) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-layout-docyolo))。 +- **表格识别模型**:[breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-table-rec))。 +- **文字识别引擎**:支持 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 [CnOCR](https://github.com/breezedeus/cnocr) ,其他语言的识别使用的是开源 OCR 工具 [EasyOCR](https://github.com/JaidedAI/EasyOCR) 。 +- **数学公式检测模型(MFD)**:[breezedeus/pix2text-mfd-1.5](https://huggingface.co/breezedeus/pix2text-mfd-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd-1.5))。基于 [CnSTD](https://github.com/breezedeus/cnstd) 实现。 +- **数学公式识别模型(MFR)**:[breezedeus/pix2text-mfr-1.5](https://huggingface.co/breezedeus/pix2text-mfr-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfr-1.5))。 + +其中多个模型来自其他开源作者, 非常感谢他们的贡献。 + +
+ Pix2Text Arch Flow +
+ +具体说明请参考 [可用模型](models.md)。 + + +P2T 作为Python3工具包,对于不熟悉Python的朋友不太友好,所以我们也发布了**可免费使用**的 **[P2T网页版](https://p2t.breezedeus.com)**,直接把图片丢进网页就能输出P2T的解析结果。**网页版会使用最新的模型,效果会比开源模型更好。** + +感兴趣的朋友欢迎扫码加小助手为好友,备注 `p2t`,小助手会定期统一邀请大家入群。群内会发布P2T相关工具的最新进展: + +
+ 微信群二维码 +
+ +作者也维护 **知识星球** [**P2T/CnOCR/CnSTD私享群**](https://t.zsxq.com/FEYZRJQ) ,这里面的提问会较快得到作者的回复,欢迎加入。**知识星球私享群**也会陆续发布一些P2T/CnOCR/CnSTD相关的私有资料,包括**部分未公开的模型**,**购买付费模型享优惠**,**不同应用场景的调用代码**,使用过程中遇到的难题解答等。星球也会发布P2T/OCR/STD相关的最新研究资料。 + +更多说明可见 [交流群](contact.md)。 + + +## 支持的语言列表 + +Pix2Text 的文字识别引擎支持 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 **[CnOCR](https://github.com/breezedeus/cnocr)** ,其他语言的识别使用的是开源 OCR 工具 **[EasyOCR](https://github.com/JaidedAI/EasyOCR)** ,感谢相关的作者们。 + +支持的**语言列表**和**语言代码**如下: +
+↓↓↓ Click to show details ↓↓↓ + +| Language | Code Name | +| ------------------- | ----------- | +| Abaza | abq | +| Adyghe | ady | +| Afrikaans | af | +| Angika | ang | +| Arabic | ar | +| Assamese | as | +| Avar | ava | +| Azerbaijani | az | +| Belarusian | be | +| Bulgarian | bg | +| Bihari | bh | +| Bhojpuri | bho | +| Bengali | bn | +| Bosnian | bs | +| Simplified Chinese | ch_sim | +| Traditional Chinese | ch_tra | +| Chechen | che | +| Czech | cs | +| Welsh | cy | +| Danish | da | +| Dargwa | dar | +| German | de | +| English | en | +| Spanish | es | +| Estonian | et | +| Persian (Farsi) | fa | +| French | fr | +| Irish | ga | +| Goan Konkani | gom | +| Hindi | hi | +| Croatian | hr | +| Hungarian | hu | +| Indonesian | id | +| Ingush | inh | +| Icelandic | is | +| Italian | it | +| Japanese | ja | +| Kabardian | kbd | +| Kannada | kn | +| Korean | ko | +| Kurdish | ku | +| Latin | la | +| Lak | lbe | +| Lezghian | lez | +| Lithuanian | lt | +| Latvian | lv | +| Magahi | mah | +| Maithili | mai | +| Maori | mi | +| Mongolian | mn | +| Marathi | mr | +| Malay | ms | +| Maltese | mt | +| Nepali | ne | +| Newari | new | +| Dutch | nl | +| Norwegian | no | +| Occitan | oc | +| Pali | pi | +| Polish | pl | +| Portuguese | pt | +| Romanian | ro | +| Russian | ru | +| Serbian (cyrillic) | rs_cyrillic | +| Serbian (latin) | rs_latin | +| Nagpuri | sck | +| Slovak | sk | +| Slovenian | sl | +| Albanian | sq | +| Swedish | sv | +| Swahili | sw | +| Tamil | ta | +| Tabassaran | tab | +| Telugu | te | +| Thai | th | +| Tajik | tjk | +| Tagalog | tl | +| Turkish | tr | +| Uyghur | ug | +| Ukranian | uk | +| Urdu | ur | +| Uzbek | uz | +| Vietnamese | vi | + +> Ref: [Supported Languages](https://www.jaided.ai/easyocr/) . + +
+ + + +## P2T 网页版 + +所有人都可以免费使用 **[P2T网页版](https://p2t.breezedeus.com)**,每人每天可以免费识别 10000 个字符,正常使用应该够用了。*请不要批量调用接口,机器资源有限,批量调用会导致其他人无法使用服务。* + +受限于机器资源,网页版当前只支持**简体中文和英文**,要尝试其他语言上的效果,请使用以下的**在线 Demo**。 + + + +## 在线 Demo 🤗 + +也可以使用 **[在线 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)**(无法科学上网可以使用 [国内镜像](https://hf.qhduan.com/spaces/breezedeus/Pix2Text-Demo)) 尝试 **P2T** 在不同语言上的效果。但在线 Demo 使用的硬件配置较低,速度会较慢。如果是简体中文或者英文图片,建议使用 **[P2T网页版](https://p2t.breezedeus.com)**。 + + +## 安装 + +嗯,顺利的话一行命令即可。 + +```bash +pip install pix2text +``` + +如果需要识别**英文**与**简体中文**之外的文字,请使用以下命令安装额外的包: + +```bash +pip install pix2text[multilingual] +``` + +安装速度慢的话,可以指定国内的安装源,如使用阿里云的安装源: + +```bash +pip install pix2text -i https://mirrors.aliyun.com/pypi/simple +``` + +如果是初次使用**OpenCV**,那估计安装都不会很顺利,bless。 + +**Pix2Text** 主要依赖 [**CnSTD>=1.2.4**](https://github.com/breezedeus/cnstd)、[**CnOCR>=2.3**](https://github.com/breezedeus/cnocr) ,以及 [**transformers>=4.37.0**](https://github.com/huggingface/transformers) 。如果安装过程遇到问题,也可参考它们的安装说明文档。 + +> **Warning** +> +> 如果电脑中从未安装过 `PyTorch`,`OpenCV` python包,初次安装可能会遇到不少问题,但一般都是常见问题,可以自行百度/Google解决。 + +更多说明参考 [安装说明](install.md) 。 + + +## 使用说明 + +参见:[使用说明](usage.md)。 + +## 示例 + +参见:[示例](examples.md)。 + +## 模型下载 + +参见:[模型](models.md)。 + +## 命令行工具 + +参见:[命令行工具](command.md)。 + + +## HTTP 服务 + +使用命令 **`p2t serve`** 开启一个 HTTP 服务,用于接收图片(当前不支持 PDF)并返回识别结果。 + +```bash +p2t serve -l en,ch_sim -H 0.0.0.0 -p 8503 +``` + +之后可以使用 curl 调用服务: + +```bash +curl -X POST \ + -F "file_type=page" \ + -F "resized_shape=768" \ + -F "embed_sep= $,$ " \ + -F "isolated_sep=$$\n, \n$$" \ + -F "image=@docs/examples/page2.png;type=image/jpeg" \ + http://0.0.0.0:8503/pix2text +``` + +更多说明参考 [命令说明/开启服务](command.md) 。 + +## Mac 桌面客户端 + +请参考 [Pix2Text-Mac](https://github.com/breezedeus/Pix2Text-Mac) 安装 Pix2Text 的 MacOS 桌面客户端。 + +
+ Pix2Text Mac 客户端 +
+ + +## 给作者来杯咖啡 + +开源不易,如果此项目对您有帮助,可以考虑 [给作者加点油🥤,鼓鼓气💪🏻](buymeacoffee.md) 。 + +--- + +官方代码库: + +* **Github**: [https://github.com/breezedeus/pix2text](https://github.com/breezedeus/pix2text) 。 +* **Gitee**: [https://gitee.com/breezedeus/pix2text](https://gitee.com/breezedeus/pix2text) 。 + +Pix2Text (P2T) 更多信息:[https://www.breezedeus.com/article/pix2text_cn](https://www.breezedeus.com/article/pix2text_cn) 。 diff --git a/docs/index_en.md b/docs/index_en.md new file mode 100644 index 0000000000000000000000000000000000000000..03420fc6e063af30911ea2ca48468a1ca60e19e1 --- /dev/null +++ b/docs/index_en.md @@ -0,0 +1,263 @@ +
+![Pix2Text](figs/p2t-logo.png){: style="width:180px"} +
+ +# Pix2Text (P2T) +[![Discord](https://img.shields.io/discord/1200765964434821260?label=Discord)](https://discord.gg/GgD87WM8Tf) +[![Downloads](https://static.pepy.tech/personalized-badge/pix2text?period=total&units=international_system&left_color=grey&right_color=orange&left_text=Downloads)](https://pepy.tech/project/pix2text) +[![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fpix2text.readthedocs.io%2Fzh-cn%2Fstable%2F&label=Visitors&countColor=%23f5c791&style=flat&labelStyle=none)](https://visitorbadge.io/status?path=https%3A%2F%2Fpix2text.readthedocs.io%2Fzh-cn%2Fstable%2F) +[![license](https://img.shields.io/github/license/breezedeus/pix2text)](./LICENSE) +[![PyPI version](https://badge.fury.io/py/pix2text.svg)](https://badge.fury.io/py/pix2text) +[![forks](https://img.shields.io/github/forks/breezedeus/pix2text)](https://github.com/breezedeus/pix2text) +[![stars](https://img.shields.io/github/stars/breezedeus/pix2text)](https://github.com/breezedeus/pix2text) +![last-release](https://img.shields.io/github/release-date/breezedeus/pix2text) +![last-commit](https://img.shields.io/github/last-commit/breezedeus/pix2text) +[![Twitter](https://img.shields.io/twitter/url?url=https%3A%2F%2Ftwitter.com%2Fbreezedeus)](https://twitter.com/breezedeus) + +
+[📖 Usage](usage.md) | +[🛠️ Install](install.md) | +[🧳 Models](models.md) | +[🛀🏻 Demo](demo.md) | +[💬 Contact](contact.md) + +[中文](index.md) | English +
+ +**Pix2Text (P2T)** aims to be a **free and open-source Python** alternative to **[Mathpix](https://mathpix.com/)**, and it can already accomplish **Mathpix**'s core functionality. **Pix2Text (P2T) can recognize layouts, tables, images, text, mathematical formulas, and integrate all of these contents into Markdown format. P2T can also convert an entire PDF file (which can contain scanned images or any other format) into Markdown format.** + +**Pix2Text (P2T)** integrates the following models: + +- **Layout Analysis Model**: [breezedeus/pix2text-layout-docyolo](https://huggingface.co/breezedeus/pix2text-layout-docyolo) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-layout-docyolo)). +- **Table Recognition Model**: [breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-table-rec)). +- **Text Recognition Engine**: Supports **80+ languages** such as **English, Simplified Chinese, Traditional Chinese, Vietnamese**, etc. For English and Simplified Chinese recognition, it uses the open-source OCR tool [CnOCR](https://github.com/breezedeus/cnocr), while for other languages, it uses the open-source OCR tool [EasyOCR](https://github.com/JaidedAI/EasyOCR). +- **Mathematical Formula Detection Model (MFD)**: [breezedeus/pix2text-mfd-1.5](https://huggingface.co/breezedeus/pix2text-mfd-1.5) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfd-1.5)). Implemented based on [CnSTD](https://github.com/breezedeus/cnstd). +- **Mathematical Formula Recognition Model (MFR)**: [breezedeus/pix2text-mfr-1.5](https://huggingface.co/breezedeus/pix2text-mfr-1.5) ([Mirror](https://hf-mirror.com/breezedeus/pix2text-mfr-1.5)). + +Several models are contributed by other open-source authors, and their contributions are highly appreciated. + +
+![Pix2Text Architecture Flow](figs/arch-flow.jpg) +
+ +For detailed explanations, please refer to the [Models](models.md). + +As a Python3 toolkit, P2T may not be very user-friendly for those who are not familiar with Python. Therefore, we also provide a **[free-to-use P2T Online Web](https://p2t.breezedeus.com)**, where you can directly upload images and get P2T parsing results. The web version uses the latest models, resulting in better performance compared to the open-source models. + +Welcome to join [**Pix2Text Discord Server**](https://discord.gg/GgD87WM8Tf), if you have any questions or suggestions. + +If you're interested, feel free to add the WeChat assistant as a friend by scanning the QR code and mentioning `p2t`. The assistant will regularly invite everyone to join the group where the latest developments related to P2T tools will be announced: + +
+![Wechat-QRCode](figs/wx-qr-code.JPG){: style="width:300px"} +
+ +The author also maintains a **Knowledge Planet** [**P2T/CnOCR/CnSTD Private Group**](https://t.zsxq.com/FEYZRJQ), where questions are answered promptly. You're welcome to join. The **knowledge planet private group** will also gradually release some private materials related to P2T/CnOCR/CnSTD, including **some unreleased models**, **discounts on purchasing premium models**, **code snippets for different application scenarios**, and answers to difficult problems encountered during use. The planet will also publish the latest research materials related to P2T/OCR/STD. + +For more contact method, please refer to [Contact](contact.md). + + +## List of Supported Languages + +The text recognition engine of Pix2Text supports **`80+` languages**, including **English, Simplified Chinese, Traditional Chinese, Vietnamese**, etc. Among these, **English** and **Simplified Chinese** recognition utilize the open-source OCR tool **[CnOCR](https://github.com/breezedeus/cnocr)**, while recognition for other languages employs the open-source OCR tool **[EasyOCR](https://github.com/JaidedAI/EasyOCR)**. Special thanks to the respective authors. + +List of **Supported Languages** and **Language Codes** are shown below: + +
+↓↓↓ Click to show details ↓↓↓ + +| Language | Code Name | +| ------------------- | ----------- | +| Abaza | abq | +| Adyghe | ady | +| Afrikaans | af | +| Angika | ang | +| Arabic | ar | +| Assamese | as | +| Avar | ava | +| Azerbaijani | az | +| Belarusian | be | +| Bulgarian | bg | +| Bihari | bh | +| Bhojpuri | bho | +| Bengali | bn | +| Bosnian | bs | +| Simplified Chinese | ch_sim | +| Traditional Chinese | ch_tra | +| Chechen | che | +| Czech | cs | +| Welsh | cy | +| Danish | da | +| Dargwa | dar | +| German | de | +| English | en | +| Spanish | es | +| Estonian | et | +| Persian (Farsi) | fa | +| French | fr | +| Irish | ga | +| Goan Konkani | gom | +| Hindi | hi | +| Croatian | hr | +| Hungarian | hu | +| Indonesian | id | +| Ingush | inh | +| Icelandic | is | +| Italian | it | +| Japanese | ja | +| Kabardian | kbd | +| Kannada | kn | +| Korean | ko | +| Kurdish | ku | +| Latin | la | +| Lak | lbe | +| Lezghian | lez | +| Lithuanian | lt | +| Latvian | lv | +| Magahi | mah | +| Maithili | mai | +| Maori | mi | +| Mongolian | mn | +| Marathi | mr | +| Malay | ms | +| Maltese | mt | +| Nepali | ne | +| Newari | new | +| Dutch | nl | +| Norwegian | no | +| Occitan | oc | +| Pali | pi | +| Polish | pl | +| Portuguese | pt | +| Romanian | ro | +| Russian | ru | +| Serbian (cyrillic) | rs_cyrillic | +| Serbian (latin) | rs_latin | +| Nagpuri | sck | +| Slovak | sk | +| Slovenian | sl | +| Albanian | sq | +| Swedish | sv | +| Swahili | sw | +| Tamil | ta | +| Tabassaran | tab | +| Telugu | te | +| Thai | th | +| Tajik | tjk | +| Tagalog | tl | +| Turkish | tr | +| Uyghur | ug | +| Ukranian | uk | +| Urdu | ur | +| Uzbek | uz | +| Vietnamese | vi | + + +> Ref: [Supported Languages](https://www.jaided.ai/easyocr/) . + +
+ + +## Online Service + +Everyone can use the **[P2T Online Service](https://p2t.breezedeus.com)** for free, with a daily limit of 10,000 characters per account, which should be sufficient for normal use. *Please refrain from bulk API calls, as machine resources are limited, and this could prevent others from accessing the service.* + +Due to hardware constraints, the Online Service currently only supports **Simplified Chinese** and **English** languages. To try the models in other languages, please use the following **Online Demo**. + + + +## Online Demo 🤗 + +You can also try the **[Online Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)** ([Mirror](https://hf-mirror.com/spaces/breezedeus/Pix2Text-Demo)) to see the performance of **P2T** in various languages. However, the online demo operates on lower hardware specifications and may be slower. For Simplified Chinese or English images, it is recommended to use the **[P2T Online Service](https://p2t.breezedeus.com)**. + + +## Install + +Well, one line of command is enough if it goes well. + +```bash +pip install pix2text +``` + +If you need to recognize languages other than **English** and **Simplified Chinese**, please use the following command to install additional packages: + +```bash +pip install pix2text[multilingual] +``` + + + +If the installation is slow, you can specify a domestic installation source, such as using the Aliyun source: + +```bash +pip install pix2text -i https://mirrors.aliyun.com/pypi/simple +``` + + +If it is your first time to use **OpenCV**, then probably the installation will not be very easy. Bless. + +**Pix2Text** mainly depends on [**CnSTD>=1.2.1**](https://github.com/breezedeus/cnstd), [**CnOCR>=2.2.2.1**](https://github.com/breezedeus/cnocr), and [**transformers>=4.37.0**](https://github.com/huggingface/transformers). If you encounter problems with the installation, you can also refer to their installation instruction documentations. + + +> **Warning** +> +> If you have never installed the `PyTorch`, `OpenCV` python packages before, you may encounter a lot of problems during the first installation, but they are usually common problems that can be solved by Baidu/Google. + +For more instructions, please refer to [Install](install.md) . + +## Usage + +Refer to: [Usage](usage.md). + +## Examples + +Refer to: [Examples](examples.md). + +## Model Downloads + +Refer to: [Models](models.md). + +## Command Line Tools + +Refer to: [Command Line Tools](command.md). + +## HTTP Service + +To start an HTTP service for receiving images (currently does not support PDF) and returning recognition results, use the command **`p2t serve`**. + +```bash +p2t serve -l en,ch_sim -H 0.0.0.0 -p 8503 +``` + +Afterwards, you can call the service using curl: + +```bash +curl -X POST \ + -F "file_type=page" \ + -F "resized_shape=768" \ + -F "embed_sep= $,$ " \ + -F "isolated_sep=$$\n, \n$$" \ + -F "image=@docs/examples/page2.png;type=image/jpeg" \ + http://0.0.0.0:8503/pix2text +``` + +For more information, refer to [Command/Starting the Service](command.md). + +## MacOS Desktop Application + +Please refer to [Pix2Text-Mac](https://github.com/breezedeus/Pix2Text-Mac) for installing the Pix2Text Desktop App for MacOS. + +
+ Pix2Text Mac App +
+ + +## A cup of coffee for the author + +It is not easy to maintain and evolve the project, so if it is helpful to you, please consider [offering the author a cup of coffee 🥤](https://www.breezedeus.com/buy-me-coffee). + +--- + +Official code base: [https://github.com/breezedeus/pix2text](https://github.com/breezedeus/pix2text). Please cite it properly. + +For more information on Pix2Text (P2T), visit: [https://www.breezedeus.com/article/pix2text](https://www.breezedeus.com/article/pix2text). diff --git a/docs/install.md b/docs/install.md new file mode 100644 index 0000000000000000000000000000000000000000..507b4013a52f8c9738ffba85f45c1ee1ce1a822a --- /dev/null +++ b/docs/install.md @@ -0,0 +1,49 @@ +# 安装 + +## pip 安装 + +嗯,顺利的话一行命令即可。 + +```bash +pip install pix2text +``` + +### 其他语言支持 +如果需要识别**英文**与**简体中文**之外的文字,请使用以下命令安装额外的包: + +```bash +pip install pix2text[multilingual] +``` + +### 使用 LLM/VLM API 接口 + +如果需要使用 **LLM/VLM** API 接口,请使用以下命令安装额外的包: + +```bash +pip install pix2text[vlm] +``` + +### 国内安装源 +安装速度慢的话,可以指定国内的安装源,如使用阿里云的安装源: + +```bash +pip install pix2text -i https://mirrors.aliyun.com/pypi/simple +``` + +如果是初次使用**OpenCV**,那估计安装都不会很顺利,bless。 + +**Pix2Text** 主要依赖 [**CnSTD>=1.2.1**](https://github.com/breezedeus/cnstd)、[**CnOCR>=2.2.2.1**](https://github.com/breezedeus/cnocr) ,以及 [**transformers>=4.37.0**](https://github.com/huggingface/transformers) 。如果安装过程遇到问题,也可参考它们的安装说明文档。 + +> **Warning** +> +> 如果电脑中从未安装过 `PyTorch`,`OpenCV` python包,初次安装可能会遇到不少问题,但一般都是常见问题,可以自行百度/Google解决。 + + +## GPU 环境使用 ONNX 模型 + +默认情况下安装的 **ONNX** 包是 **`onnxruntime`**,它只能在 `CPU` 上运行。如果需要在 `GPU` 环境使用 **ONNX** 模型,需要卸载此包,然后安装包 **`onnxruntime-gpu`** 。 + +```bash +pip uninstall onnxruntime +pip install onnxruntime-gpu +``` diff --git a/docs/models.md b/docs/models.md new file mode 100644 index 0000000000000000000000000000000000000000..ecc703980b6d4fb9bab12d57092ae5ce5e5b63c1 --- /dev/null +++ b/docs/models.md @@ -0,0 +1,99 @@ +# 各种模型 + +**Pix2Text (P2T)** 整合了很多不同功能的模型,主要包括: + +- **版面分析模型**:[breezedeus/pix2text-layout](https://huggingface.co/breezedeus/pix2text-layout) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-layout))。 +- **表格识别模型**:[breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-table-rec))。 +- **文字识别引擎**:支持 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 [CnOCR](https://github.com/breezedeus/cnocr) ,其他语言的识别使用的是开源 OCR 工具 [EasyOCR](https://github.com/JaidedAI/EasyOCR) 。 +- **数学公式检测模型(MFD)**:[breezedeus/pix2text-mfd-1.5](https://huggingface.co/breezedeus/pix2text-mfd-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd-1.5))。基于 [CnSTD](https://github.com/breezedeus/cnstd) 实现。 +- **数学公式识别模型(MFR)**:[breezedeus/pix2text-mfr-1.5](https://huggingface.co/breezedeus/pix2text-mfr-1.5) ([国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfr-1.5))。 + +其中多个模型来自其他开源作者, 非常感谢他们的贡献。 + +这些模型正常情况下都会自动下载(可能会比较慢,只要不报错请勿手动打断下载过程),但如果下载失败,可以参考以下的说明手动下载。 + +除基础模型外,Pix2Text 还提供了以下模型的高级付费版: + +- MFD 和 MFR 付费模型:具体参考 [P2T详细资料 | Breezedeus.com](https://www.breezedeus.com/article/pix2text_cn)。 +- CnOCR 付费模型:具体参考 [CnOCR详细资料 | Breezedeus.com](https://www.breezedeus.com/article/cnocr)。 + +具体说明请见本页面末尾。 + +下面的说明主要针对免费的基础模型。 + +## 版面分析模型 +**版面分析模型** 下载地址:[breezedeus/pix2text-layout](https://huggingface.co/breezedeus/pix2text-layout) (不能科学上网请使用 [国内镜像](https://hf-mirror.com/breezedeus/pix2text-layout))。 +把这里面的所有文件都下载到 `~/.pix2text/1.1/layout-parser` (Windows 系统放在 `C:\Users\\AppData\Roaming\pix2text\1.1\layout-parser`)目录下即可,目录不存在的话请自己创建。 + +> 注:上面路径的 `1.1` 是 pix2text 的版本号,`1.1.*` 都对应 `1.1`。如果是其他版本请自行替换。 + +## 表格识别模型 +**表格识别模型** 下载地址:[breezedeus/pix2text-table-rec](https://huggingface.co/breezedeus/pix2text-table-rec) (不能科学上网请使用 [国内镜像](https://hf-mirror.com/breezedeus/pix2text-table-rec))。 +把这里面的所有文件都下载到 `~/.pix2text/1.1/table-rec` (Windows 系统放在 `C:\Users\\AppData\Roaming\pix2text\1.1\table-rec`)目录下即可,目录不存在的话请自己创建。 + +> 注:上面路径的 `1.1` 是 pix2text 的版本号,`1.1.*` 都对应 `1.1`。如果是其他版本请自行替换。 + +## 数学公式检测模型(MFD) +### `pix2text >= 1.1.1` +Pix2Text 自 **V1.1.1** 开始,**数学公式检测模型** 下载地址:[breezedeus/pix2text-mfd](https://huggingface.co/breezedeus/pix2text-mfd) (不能科学上网请使用 [国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfd))。 + +### `pix2text < 1.1.1` +**数学公式检测模型**(MFD)来自 [CnSTD](https://github.com/breezedeus/cnstd) 的数学公式检测模型(MFD),请参考其代码库说明。 + +如果系统无法自动成功下载模型文件,则需要手动从 [**cnstd-cnocr-models**](https://huggingface.co/breezedeus/cnstd-cnocr-models) ([国内镜像](https://hf-mirror.com/breezedeus/cnstd-cnocr-models))项目中下载,或者从[百度云盘](https://pan.baidu.com/s/1zDMzArCDrrXHWL0AWxwYQQ?pwd=nstd)(提取码为 `nstd`)下载对应的zip文件并把它存放于 `~/.cnstd/1.2`(Windows下为 `C:\Users\\AppData\Roaming\cnstd\1.2`)目录中。 + +## 数学公式识别模型(MFR) +**数学公式识别模型** 下载地址:[breezedeus/pix2text-mfr](https://huggingface.co/breezedeus/pix2text-mfr) (不能科学上网请使用 [国内镜像](https://hf-mirror.com/breezedeus/pix2text-mfr))。 +把这里面的所有文件都下载到 `~/.pix2text/1.1/mfr-1.5-onnx` (Windows 系统放在 `C:\Users\\AppData\Roaming\pix2text\1.1\mfr-1.5-onnx`)目录下即可,目录不存在的话请自己创建。 + +> 注:上面路径的 `1.1` 是 pix2text 的版本号,`1.1.*` 都对应 `1.1`。如果是其他版本请自行替换。 + +## 文字识别引擎 +Pix2Text 的**文字识别引擎**可以识别 **`80+` 种语言**,如**英文、简体中文、繁体中文、越南语**等。其中,**英文**和**简体中文**识别使用的是开源 OCR 工具 [CnOCR](https://github.com/breezedeus/cnocr) ,其他语言的识别使用的是开源 OCR 工具 [EasyOCR](https://github.com/JaidedAI/EasyOCR) 。 + +正常情况下,CnOCR 的模型都会自动下载。如果无法自动下载,可以参考以下说明手动下载。 +CnOCR 的开源模型都放在 [**cnstd-cnocr-models**](https://huggingface.co/breezedeus/cnstd-cnocr-models) ([国内镜像](https://hf-mirror.com/breezedeus/cnstd-cnocr-models))项目中,可免费下载使用。 +如果下载太慢,也可以从 [百度云盘](https://pan.baidu.com/s/1RhLBf8DcLnLuGLPrp89hUg?pwd=nocr) 下载, 提取码为 `nocr`。具体方法可参考 [CnOCR在线文档/使用方法](https://cnocr.readthedocs.io/zh-cn/latest/usage) 。 + +CnOCR 中的文字检测引擎使用的是 [CnSTD](https://github.com/breezedeus/cnstd), +如果系统无法自动成功下载模型文件,则需要手动从 [**cnstd-cnocr-models**](https://huggingface.co/breezedeus/cnstd-cnocr-models) ([国内镜像](https://hf-mirror.com/breezedeus/cnstd-cnocr-models))项目中下载,或者从[百度云盘](https://pan.baidu.com/s/1zDMzArCDrrXHWL0AWxwYQQ?pwd=nstd)(提取码为 `nstd`)下载对应的zip文件并把它存放于 `~/.cnstd/1.2`(Windows下为 `C:\Users\\AppData\Roaming\cnstd\1.2`)目录中。 + +关于 CnOCR 模型的更多信息请参考 [CnOCR在线文档/可用模型](https://cnocr.readthedocs.io/zh-cn/latest/models)。 + +CnOCR 也提供**高级版的付费模型**,具体参考本文末尾的说明。 + +- CnOCR 付费模型:具体参考 [CnOCR详细资料 | Breezedeus.com](https://www.breezedeus.com/article/cnocr)。 + +
+ +EasyOCR 模型下载请参考 [EasyOCR](https://github.com/JaidedAI/EasyOCR)。 + +## 高级版付费模型 + +除基础模型外,Pix2Text 还提供了以下模型的高级付费版: + +- MFD 和 MFR 付费模型:具体参考 [P2T详细资料 | Breezedeus.com](https://www.breezedeus.com/article/pix2text_cn)。 +- CnOCR 付费模型:具体参考 [CnOCR详细资料 | Breezedeus.com](https://www.breezedeus.com/article/cnocr)。 + +> 注意,付费模型包含不同的 license 版本,购买时请参考具体的产品说明。 + +建议购买前首先使用 **[在线 Demo](https://huggingface.co/spaces/breezedeus/Pix2Text-Demo)**(无法科学上网可以使用 [国内 Demo](https://hf-mirror.com/spaces/breezedeus/Pix2Text-Demo))**验证模型效果后再购买**。 + +**模型购买地址**: + +| 模型名称 | 购买地址 | 说明 +|--------------|------------------------------------------------------------|-----------------------------------------------------------------------------------| +| MFD pro 模型 | [Lemon Squeezy](https://ocr.lemonsqueezy.com) | 包含企业版和个人版,可开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) | +| MFD pro 模型 | [B站](https://mall.bilibili.com/neul-next/detailuniversal/detail.html?isMerchant=1&page=detailuniversal_detail&saleType=10&itemsId=11883911&loadingShow=1&noTitleBar=1&msource=merchant_share) | 仅包含个人版,不可商用,不能开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) | +| MFR pro 模型 | [Lemon Squeezy](https://ocr.lemonsqueezy.com) | 包含企业版和个人版,可开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) | +| MFR pro 模型 | [B站](https://mall.bilibili.com/neul-next/detailuniversal/detail.html?isMerchant=1&page=detailuniversal_detail&saleType=10&itemsId=11884166&loadingShow=1&noTitleBar=1&msource=merchant_share) | 仅包含个人版,不可商用,不能开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) | +| CnOCR pro 模型 | [Lemon Squeezy](https://ocr.lemonsqueezy.com) | 包含企业版和个人版,可开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) 和 [CnOCR详细资料](https://www.breezedeus.com/article/cnocr) | +| CnOCR pro 模型 | [B站](https://mall.bilibili.com/neul-next/detailuniversal/detail.html?isMerchant=1&page=detailuniversal_detail&saleType=10&itemsId=11884138&loadingShow=1&noTitleBar=1&msource=merchant_share) | 仅包含个人版,不可商用,不能开发票。具体说明见:[P2T详细资料](https://www.breezedeus.com/article/pix2text_cn) 和 [CnOCR详细资料](https://www.breezedeus.com/article/cnocr) | + +购买过程遇到问题可以扫码加小助手为好友进行沟通,备注 `p2t`,小助手会尽快答复: + +
+![微信交流群](https://huggingface.co/datasets/breezedeus/cnocr-wx-qr-code/resolve/main/wx-qr-code.JPG){: style="width:270px"} +
+ +更多联系方式见 [交流群](contact.md)。 \ No newline at end of file diff --git a/docs/pix2text/latex_ocr.md b/docs/pix2text/latex_ocr.md new file mode 100644 index 0000000000000000000000000000000000000000..da75cbe9b3f1f5b54598a11c29e6397b7a5ee4ac --- /dev/null +++ b/docs/pix2text/latex_ocr.md @@ -0,0 +1 @@ +:::pix2text.latex_ocr diff --git a/docs/pix2text/pix_to_text.md b/docs/pix2text/pix_to_text.md new file mode 100644 index 0000000000000000000000000000000000000000..878b6ff24c4f0c827b6cf41430c3aef87633d0c5 --- /dev/null +++ b/docs/pix2text/pix_to_text.md @@ -0,0 +1 @@ +:::pix2text.pix_to_text diff --git a/docs/pix2text/table_ocr.md b/docs/pix2text/table_ocr.md new file mode 100644 index 0000000000000000000000000000000000000000..c78c6bd2d9d3ef1d7ee2cb87e735a1ac64b6700a --- /dev/null +++ b/docs/pix2text/table_ocr.md @@ -0,0 +1 @@ +:::pix2text.table_ocr diff --git a/docs/pix2text/text_formula_ocr.md b/docs/pix2text/text_formula_ocr.md new file mode 100644 index 0000000000000000000000000000000000000000..5efdb41c60086202bd04ff4b53f25a5686b37736 --- /dev/null +++ b/docs/pix2text/text_formula_ocr.md @@ -0,0 +1 @@ +:::pix2text.text_formula_ocr diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b15e66ff06869841203bf64630f7e01c49e047f --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,387 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --output-file=requirements.txt requirements.in +# +#--index-url https://mirrors.aliyun.com/pypi/simple +#--extra-index-url https://pypi.tuna.tsinghua.edu.cn/simple +#--extra-index-url https://pypi.org/simple + +aiohttp==3.9.3 + # via + # datasets + # fsspec +aiosignal==1.3.1 + # via aiohttp +appdirs==1.4.4 + # via wandb +async-timeout==4.0.3 + # via aiohttp +attrs==23.2.0 + # via aiohttp +certifi==2024.2.2 + # via + # requests + # sentry-sdk +charset-normalizer==3.3.2 + # via requests +click==8.1.7 + # via + # -r requirements.in + # cnocr + # cnstd + # wandb +cnocr[ort-cpu]==2.3.0.2 + # via + # -r requirements.in + # cnocr +cnstd==1.2.4.1 + # via + # -r requirements.in + # cnocr +coloredlogs==15.0.1 + # via + # onnxruntime + # optimum +contourpy==1.2.0 + # via matplotlib +cycler==0.12.1 + # via matplotlib +datasets==2.17.0 + # via + # evaluate + # optimum +dill==0.3.8 + # via + # datasets + # evaluate + # multiprocess +docker-pycreds==0.4.0 + # via wandb +easyocr==1.7.1 + # via -r requirements.in +evaluate==0.4.1 + # via optimum +filelock==3.13.1 + # via + # datasets + # huggingface-hub + # torch + # transformers +flatbuffers==23.5.26 + # via onnxruntime +fonttools==4.49.0 + # via matplotlib +frozenlist==1.4.1 + # via + # aiohttp + # aiosignal +fsspec[http]==2023.10.0 + # via + # datasets + # evaluate + # huggingface-hub + # pytorch-lightning + # torch +gitdb==4.0.11 + # via gitpython +gitpython==3.1.42 + # via wandb +huggingface-hub==0.20.3 + # via + # cnstd + # datasets + # evaluate + # optimum + # tokenizers + # transformers +humanfriendly==10.0 + # via coloredlogs +idna==3.6 + # via + # requests + # yarl +imageio==2.34.0 + # via scikit-image +importlib-resources==6.1.1 + # via matplotlib +jinja2==3.0.3 + # via torch +kiwisolver==1.4.5 + # via matplotlib +lazy-loader==0.3 + # via scikit-image +lightning-utilities==0.10.1 + # via + # pytorch-lightning + # torchmetrics +markupsafe==2.1.5 + # via jinja2 +matplotlib==3.8.3 + # via + # cnstd + # seaborn +mpmath==1.3.0 + # via sympy +multidict==6.0.5 + # via + # aiohttp + # yarl +multiprocess==0.70.16 + # via + # datasets + # evaluate +networkx==3.2.1 + # via + # scikit-image + # torch +ninja==1.11.1.1 + # via easyocr +numpy==1.26.4 + # via + # -r requirements.in + # cnocr + # cnstd + # contourpy + # datasets + # easyocr + # evaluate + # imageio + # matplotlib + # onnx + # onnxruntime + # opencv-python + # opencv-python-headless + # optimum + # pandas + # pyarrow + # pytorch-lightning + # scikit-image + # scipy + # seaborn + # shapely + # tifffile + # torchmetrics + # torchvision + # transformers +onnx==1.15.0 + # via + # cnocr + # cnstd + # optimum +onnxruntime==1.17.0 + # via + # cnocr + # optimum +opencv-python==4.9.0.80 + # via + # -r requirements.in + # cnstd +opencv-python-headless==4.9.0.80 + # via easyocr +optimum[onnxruntime]==1.16.2 + # via -r requirements.in +packaging==23.2 + # via + # datasets + # evaluate + # huggingface-hub + # lightning-utilities + # matplotlib + # onnxruntime + # optimum + # pytorch-lightning + # scikit-image + # torchmetrics + # transformers +pandas==2.2.0 + # via + # cnstd + # datasets + # evaluate + # seaborn +pillow==10.2.0 + # via + # -r requirements.in + # cnocr + # cnstd + # easyocr + # imageio + # matplotlib + # scikit-image + # torchvision +polygon3==3.0.9.1 + # via cnstd +protobuf==4.25.3 + # via + # onnx + # onnxruntime + # optimum + # transformers + # wandb +psutil==5.9.8 + # via wandb +pyarrow==15.0.0 + # via datasets +pyarrow-hotfix==0.6 + # via datasets +pyclipper==1.3.0.post5 + # via + # cnstd + # easyocr +pymupdf==1.24.1 + # via -r requirements.in +pymupdfb==1.24.1 + # via pymupdf +pyparsing==3.1.1 + # via matplotlib +pyspellchecker==0.8.1 + # via -r requirements.in +python-bidi==0.4.2 + # via easyocr +python-dateutil==2.8.2 + # via + # matplotlib + # pandas +pytorch-lightning==2.2.0.post0 + # via + # cnocr + # cnstd +pytz==2024.1 + # via pandas +pyyaml==6.0.1 + # via + # cnstd + # datasets + # easyocr + # huggingface-hub + # pytorch-lightning + # transformers + # wandb +regex==2023.12.25 + # via transformers +requests==2.31.0 + # via + # datasets + # evaluate + # fsspec + # huggingface-hub + # responses + # torchvision + # transformers + # wandb +responses==0.18.0 + # via evaluate +safetensors==0.4.2 + # via transformers +scikit-image==0.22.0 + # via easyocr +scipy==1.12.0 + # via + # cnstd + # easyocr + # scikit-image +seaborn==0.13.2 + # via cnstd +sentencepiece==0.1.99 + # via transformers +sentry-sdk==1.40.4 + # via wandb +setproctitle==1.3.3 + # via wandb +shapely==2.0.2 + # via + # cnstd + # easyocr +six==1.16.0 + # via + # docker-pycreds + # python-bidi + # python-dateutil +smmap==5.0.1 + # via gitdb +sympy==1.12 + # via + # onnxruntime + # optimum + # torch +tifffile==2024.2.12 + # via scikit-image +tokenizers==0.15.2 + # via transformers +torch==2.2.0 + # via + # -r requirements.in + # cnocr + # cnstd + # easyocr + # optimum + # pytorch-lightning + # torchmetrics + # torchvision +torchmetrics==1.3.1 + # via + # cnocr + # pytorch-lightning +torchvision==0.17.0 + # via + # -r requirements.in + # cnocr + # cnstd + # easyocr +tqdm==4.66.2 + # via + # -r requirements.in + # cnocr + # cnstd + # datasets + # evaluate + # huggingface-hub + # pytorch-lightning + # transformers +transformers[sentencepiece]==4.37.2 + # via + # -r requirements.in + # optimum +typing-extensions==4.9.0 + # via + # huggingface-hub + # lightning-utilities + # pytorch-lightning + # torch + # wandb +tzdata==2024.1 + # via pandas +unidecode==1.3.8 + # via cnstd +urllib3==2.2.0 + # via + # requests + # responses + # sentry-sdk +wandb==0.16.3 + # via cnocr +xxhash==3.4.1 + # via + # datasets + # evaluate +yarl==1.9.4 + # via aiohttp +zipp==3.17.0 + # via importlib-resources + +doclayout-yolo<0.1 +litellm<2.0 + +# The following packages are considered to be unsafe in a requirements file: +# setuptools + +# for mkdocs +pygments==2.11 +jinja2<3.1.0 +mkdocs==1.2.2 +mkdocs-macros-plugin==0.6.0 +mkdocs-material==7.3.0 +mkdocs-material-extensions==1.0.3 +mkdocstrings==0.16.1 diff --git a/docs/train.md b/docs/train.md new file mode 100644 index 0000000000000000000000000000000000000000..08f3e0a35e44a66b370ab6b70107400e61722426 --- /dev/null +++ b/docs/train.md @@ -0,0 +1,3 @@ +# Model Train + +TODO diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 0000000000000000000000000000000000000000..c5d8c2a49dd9a625dc0206bc9aa3cf74208c748c --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,547 @@ +# Usage + +## 模型文件自动下载 + +首次使用 **Pix2Text** 时,系统会**自动下载**所需的开源模型,并存于 `~/.pix2text` 目录(Windows下默认路径为 `C:\Users\\AppData\Roaming\pix2text`)。 +CnOCR 和 CnSTD 中的模型分别存于 `~/.cnocr` 和 `~/.cnstd` 中(Windows 下默认路径为 `C:\Users\\AppData\Roaming\cnocr` 和 `C:\Users\\AppData\Roaming\cnstd`)。 +下载过程请耐心等待,无法科学上网时系统会自动尝试其他可用站点进行下载,所以可能需要等待较长时间。 +对于没有网络连接的机器,可以先把模型下载到其他机器上,然后拷贝到对应目录。 + +如果系统无法自动成功下载模型文件,则需要手动下载模型文件,可以参考 [huggingface.co/breezedeus](https://huggingface.co/breezedeus) ([国内镜像](https://hf-mirror.com/breezedeus))自己手动下载。 + +具体说明见 [模型下载](models.md)。 + + +## 初始化 +### 方法一 + +类 [Pix2Text](pix2text/pix_to_text.md) 是识别主类,包含了多个识别函数识别不同类型的 **图片** 或 **PDF文件** 中的内容。类 `Pix2Text` 的初始化函数如下: + +```python +class Pix2Text(object): + def __init__( + self, + *, + layout_parser: Optional[LayoutParser] = None, + text_formula_ocr: Optional[TextFormulaOCR] = None, + table_ocr: Optional[TableOCR] = None, + **kwargs, + ): + """ + Initialize the Pix2Text object. + Args: + layout_parser (LayoutParser): The layout parser object; default value is `None`, which means to create a default one + text_formula_ocr (TextFormulaOCR): The text and formula OCR object; default value is `None`, which means to create a default one + table_ocr (TableOCR): The table OCR object; default value is `None`, which means not to recognize tables + **kwargs (dict): Other arguments, currently not used + """ +``` + +其中的几个参数含义如下: + +* `layout_parser`:版面分析模型对象,默认值为 `None`,表示使用默认的版面分析模型; +* `text_formula_ocr`:文字与公式识别模型对象,默认值为 `None`,表示使用默认的文字与公式识别模型; +* `table_ocr`:表格识别模型对象,默认值为 `None`,表示不识别表格; +* `**kwargs`:其他参数,目前未使用。 + + +每个参数都有默认取值,所以可以不传入任何参数值进行初始化:`p2t = Pix2Text()`。但请注意,如果不传入任何参数值,那么只会导入默认的版面分析模型和文字与公式识别模型,而**不会导入表格识别模型**。 + +初始化 Pix2Text 实例的更好的方法是使用以下的函数。 + +### 方法二 +可以通过指定配置信息来初始化 `Pix2Text` 类的实例: + +```python +@classmethod +def from_config( + cls, + total_configs: Optional[dict] = None, + enable_formula: bool = True, + enable_table: bool = True, + device: str = None, + **kwargs, +): + """ + Create a Pix2Text object from the configuration. + Args: + total_configs (dict): The total configuration; default value is `None`, which means to use the default configuration. + If not None, it should contain the following keys: + + * `layout`: The layout parser configuration + * `text_formula`: The TextFormulaOCR configuration + * `table`: The table OCR configuration + enable_formula (bool): Whether to enable formula recognition; default value is `True` + enable_table (bool): Whether to enable table recognition; default value is `True` + device (str): The device to run the model; optional values are 'cpu', 'gpu' or 'cuda'; + default value is `None`, which means to select the device automatically + **kwargs (dict): Other arguments + + Returns: a Pix2Text object + + """ +``` + +其中的几个参数含义如下: + +* `total_configs`:总配置,包含以下几个键值: + - `layout`:版面分析模型的配置; + - `text_formula`:文字与公式识别模型的配置; + - `table`:表格识别模型的配置; + 默认值为 `None`,表示使用默认配置。 +* `enable_formula`:是否启用公式识别,默认值为 `True`; +* `enable_table`:是否启用表格识别,默认值为 `True`; +* `device`:运行模型的设备,可选值为 `'cpu'`, `'gpu'` 或 `'cuda'`,默认值为 `None`,表示自动选择设备; +* `**kwargs`:其他参数,目前未使用。 + +这个函数的返回值是一个 `Pix2Text` 类的实例,可以直接使用这个实例进行识别。 + +推荐使用此函数初始化 Pix2Text 的实例,如:`p2t = Pix2Text.from_config()`。 + +一个包含配置信息的示例如下: + +```python +import os +from pix2text import Pix2Text + +text_formula_config = dict( + languages=('en', 'ch_sim'), # 设置识别的语言 + mfd=dict( # 声明 MFD 的初始化参数 + model_path=os.path.expanduser( + '~/.pix2text/1.1/mfd-onnx/mfd-v20240618.onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + formula=dict( + model_name='mfr-pro', + model_backend='onnx', + model_dir=os.path.expanduser( + '~/.pix2text/1.1/mfr-pro-onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + text=dict( + rec_model_name='doc-densenet_lite_666-gru_large', + rec_model_backend='onnx', + rec_model_fp=os.path.expanduser( + '~/.cnocr/2.3/doc-densenet_lite_666-gru_large/cnocr-v2.3-doc-densenet_lite_666-gru_large-epoch=005-ft-model.onnx' + # noqa + ), # 注:修改成你的模型文件所存储的路径 + ), +) +total_config = { + 'layout': {'scores_thresh': 0.45}, + 'text_formula': text_formula_config, +} +p2t = Pix2Text.from_config(total_configs=total_config) +``` + +使用 VLM API 做文字和公式识别的示例如下: + +```python +import os +from pix2text import Pix2Text + +model_name=os.getenv("GEMINI_MODEL") # "gemini/gemini-2.0-flash-lite" +api_key=os.getenv("GEMINI_API_KEY") # "" + +total_config = { + 'layout': None, + 'text_formula': { + "model_type": "VlmTextFormulaOCR", # 指定类名 + "model_name": model_name, + "api_key": api_key, + }, + "table": { + "model_type": "VlmTableOCR", # 指定类名 + "model_name": model_name, + "api_key": api_key, + }, +} +p2t = Pix2Text.from_config(total_configs=total_config) +``` +`model_name` 和 `api_key` 的取值,具体可参考 [LiteLLM 文档](https://docs.litellm.ai/docs/)。 + +更多初始化的示例请参见 [tests/test_pix2text.py](https://github.com/breezedeus/Pix2Text/blob/main/tests/test_pix2text.py)。 + +## 各种识别接口 + +类 `Pix2Text` 提供了不同的识别函数来识别不同类似的图片或者 PDF 文件内容,下面分别说明。 + + +### 1. 函数 `.recognize_pdf()` + +此函数用于识别一整个 PDF 文件中的内容。**PDF 文件的内容可以只包含图片而无文字内容**, +如示例文件 [examples/test-doc.pdf](examples/test-doc.pdf)。 +识别时,可以指定识别的页数,也可以指定识别的 PDF 文件编号。 +函数定义如下: + +```python +def recognize_pdf( + self, + pdf_fp: Union[str, Path], + pdf_number: int = 0, + pdf_id: Optional[str] = None, + page_numbers: Optional[List[int]] = None, + **kwargs, +) -> Document: + """ + recognize a pdf file + Args: + pdf_fp (Union[str, Path]): pdf file path + pdf_number (int): pdf number + pdf_id (str): pdf id + page_numbers (List[int]): page numbers to recognize; default is `None`, which means to recognize all pages + kwargs (dict): Optional keyword arguments. The same as `recognize_page` + + Returns: a Document object. Use `doc.to_markdown('output-dir')` to get the markdown output of the recognized document. + + """ +``` + +**函数说明**: + +* 输入参数 `pdf_fp`:PDF 文件的路径; +* 输入参数 `pdf_number`:PDF 文件的编号,默认值为 `0`; +* 输入参数 `pdf_id`:PDF 文件的 ID,默认值为 `None`; +* 输入参数 `page_numbers`:需要识别的页码列表(页码从 0 开始计数,如 `[0, 1]` 表示只识别文件的第 1、2 页内容),默认值为 `None`,表示识别所有页; +* 输入参数 `**kwargs`:其他参数,具体说明参考下面的函数 `recognize_page()`。 + +**返回值**:返回一个 `Document` 对象,可以使用 `doc.to_markdown('output-dir')` 来获取识别结果的 markdown 输出。 + +**调用示例**: + +```python +from pix2text import Pix2Text + +img_fp = 'examples/test-doc.pdf' +p2t = Pix2Text.from_config() +out_md = p2t.recognize_pdf( + img_fp, + page_numbers=[0, 1], + table_as_image=True, + save_debug_res=f'./output-debug', +) +out_md.to_markdown('output-pdf-md') +``` + +### 2. 函数 `.recognize_page()` + +此函数用于识别一张包含复杂排版的页面图片中的内容。图片可以包含多列、图片、表格等内容,如示例图片 [examples/page2.png](examples/page2.png)。 +函数定义如下: + +```python +def recognize_page( + self, + img: Union[str, Path, Image.Image], + page_number: int = 0, + page_id: Optional[str] = None, + **kwargs, +) -> Page: + """ + Analyze the layout of the image, and then recognize the information contained in each section. + + Args: + img (str or Image.Image): an image path, or `Image.Image` loaded by `Image.open()` + page_number (str): page number; default value is `0` + page_id (str): page id; default value is `None`, which means to use the `str(page_number)` + kwargs (): + * resized_shape (int): Resize the image width to this size for processing; default value is `768` + * mfr_batch_size (int): batch size for MFR; When running on GPU, this value is suggested to be set to greater than 1; default value is `1` + * embed_sep (tuple): Prefix and suffix for embedding latex; only effective when `return_text` is `True`; default value is `(' $', '$ ')` + * isolated_sep (tuple): Prefix and suffix for isolated latex; only effective when `return_text` is `True`; default value is two-dollar signs + * line_sep (str): The separator between lines of text; only effective when `return_text` is `True`; default value is a line break + * auto_line_break (bool): Automatically line break the recognized text; only effective when `return_text` is `True`; default value is `True` + * det_text_bbox_max_width_expand_ratio (float): Expand the width of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.3` + * det_text_bbox_max_height_expand_ratio (float): Expand the height of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.2` + * embed_ratio_threshold (float): The overlap threshold for embed formulas and text lines; default value is `0.6`. + When the overlap between an embed formula and a text line is greater than or equal to this threshold, + the embed formula and the text line are considered to be on the same line; + otherwise, they are considered to be on different lines. + * table_as_image (bool): If `True`, the table will be recognized as an image (don't parse the table content as text) ; default value is `False` + * title_contain_formula (bool): If `True`, the title of the page will be recognized as a mixed image (text and formula). If `False`, it will be recognized as a text; default value is `False` + * text_contain_formula (bool): If `True`, the text of the page will be recognized as a mixed image (text and formula). If `False`, it will be recognized as a text; default value is `True` + * formula_rec_kwargs (dict): generation arguments passed to formula recognizer `latex_ocr`; default value is `{}` + * save_debug_res (str): if `save_debug_res` is set, the directory to save the debug results; default value is `None`, which means not to save + + Returns: a Page object. Use `page.to_markdown('output-dir')` to get the markdown output of the recognized page. + """ +``` + +**函数说明**: + +* 输入参数 `img`:图片路径或者 `Image.Image` 对象; +* 输入参数 `page_number`:页码,默认值为 `0`; +* 输入参数 `page_id`:页码 ID,默认值为 `None`,此时会使用 `str(page_number)` 作为其取值; +* kwargs:其他参数,具体说明如下: + - `resized_shape`:调整图片的宽度为此大小以进行处理,默认值为 `768`; + - `mfr_batch_size`:MFR 预测时使用的批大小;在 GPU 上运行时,建议将此值设置为大于 `1`;默认值为 `1`; + - `embed_sep`:嵌入 LaTeX 的前缀和后缀;仅在 `return_text` 为 `True` 时有效;默认值为 `(' $', '$ ')`; + - `isolated_sep`:孤立 LaTeX 的前缀和后缀;仅在 `return_text` 为 `True` 时有效;默认值为两个美元符号; + - `line_sep`:文本行之间的分隔符;仅在 `return_text` 为 `True` 时有效;默认值为换行符; + - `auto_line_break`:自动换行识别的文本;仅在 `return_text` 为 `True` 时有效;默认值为 `True`; + - `det_text_bbox_max_width_expand_ratio`:扩展检测文本框的宽度。此值表示相对于原始框高度的最大扩展比率;默认值为 `0.3`; + - `det_text_bbox_max_height_expand_ratio`:扩展检测文本框的高度。此值表示相对于原始框高度的最大扩展比率;默认值为 `0.2`; + - `embed_ratio_threshold`:嵌入公式和文本行之间的重叠阈值;默认值为 `0.6`。当嵌入公式和文本行之间的重叠大于或等于此阈值时,认为嵌入公式和文本行在同一行;否则,认为它们在不同行 + - `table_as_image`:如果为 `True`,则将表格识别为图像(不将表格内容解析为文本);默认值为 `False` + - `title_contain_formula`:如果为 `True`,则将页面标题作为为混合图像(文本和公式)进行识别。如果为 `False`,则将其作为文本图片进行识别(不识别公式);默认值为 `False` + - `text_contain_formula`:如果为 `True`,则将页面文本作为混合图像(文本和公式)进行识别。如果为 `False`,则将其作为文本进行识别(不识别公式);默认值为 `True` + - `formula_rec_kwargs`:传递给公式识别器 `latex_ocr` 的生成参数;默认值为 `{}` + - `save_debug_res`:如果设置了 `save_debug_res`,则把各种中间的解析结果存入此目录以便于调试;默认值为 `None`,表示不保存 + +**返回值**:返回一个 `Page` 对象,可以使用 `page.to_markdown('output-dir')` 来获取识别结果的 markdown 输出。 + +**调用示例**: + +```python +from pix2text import Pix2Text + +img_fp = 'examples/page2.png' +p2t = Pix2Text.from_config() +out_page = p2t.recognize_page( + img_fp, + title_contain_formula=False, + text_contain_formula=False, + save_debug_res=f'./output-debug', +) +out_page.to_markdown('output-page-md') +``` + + +### 3. 函数 `.recognize_text_formula()` + +此函数用于识别一张包含文字和公式的图片(如段落截图)中的内容,如示例图片 [examples/mixed.jpg](examples/mixed.jpg)。 +函数定义如下: + +```python +def recognize_text_formula( + self, img: Union[str, Path, Image.Image], return_text: bool = True, **kwargs, +) -> Union[str, List[str], List[Any], List[List[Any]]]: + """ + Analyze the layout of the image, and then recognize the information contained in each section. + + Args: + img (str or Image.Image): an image path, or `Image.Image` loaded by `Image.open()` + return_text (bool): Whether to return the recognized text; default value is `True` + kwargs (): + * resized_shape (int): Resize the image width to this size for processing; default value is `768` + * save_analysis_res (str): Save the mfd result image in this file; default is `None`, which means not to save + * mfr_batch_size (int): batch size for MFR; When running on GPU, this value is suggested to be set to greater than 1; default value is `1` + * embed_sep (tuple): Prefix and suffix for embedding latex; only effective when `return_text` is `True`; default value is `(' $', '$ ')` + * isolated_sep (tuple): Prefix and suffix for isolated latex; only effective when `return_text` is `True`; default value is two-dollar signs + * line_sep (str): The separator between lines of text; only effective when `return_text` is `True`; default value is a line break + * auto_line_break (bool): Automatically line break the recognized text; only effective when `return_text` is `True`; default value is `True` + * det_text_bbox_max_width_expand_ratio (float): Expand the width of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.3` + * det_text_bbox_max_height_expand_ratio (float): Expand the height of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.2` + * embed_ratio_threshold (float): The overlap threshold for embed formulas and text lines; default value is `0.6`. + When the overlap between an embed formula and a text line is greater than or equal to this threshold, + the embed formula and the text line are considered to be on the same line; + otherwise, they are considered to be on different lines. + * table_as_image (bool): If `True`, the table will be recognized as an image; default value is `False` + * formula_rec_kwargs (dict): generation arguments passed to formula recognizer `latex_ocr`; default value is `{}` + + Returns: a str when `return_text` is `True`; or a list of ordered (top to bottom, left to right) dicts when `return_text` is `False`, + with each dict representing one detected box, containing keys: + + * `type`: The category of the image; Optional: 'text', 'isolated', 'embedding' + * `text`: The recognized text or Latex formula + * `score`: The confidence score [0, 1]; the higher, the more confident + * `position`: Position information of the block, `np.ndarray`, with shape of [4, 2] + * `line_number`: The line number of the box (first line `line_number==0`), boxes with the same value indicate they are on the same line + + """ +``` + +**函数说明**: + +* 输入参数 `img`:图片路径或者 `Image.Image` 对象; +* 输入参数 `return_text`:是否返回纯文本;取值为 `False` 时返回带有结构化信息的 list;默认值为 `True`; +* 输入参数 `kwargs`:其他参数,具体说明如下: + - `resized_shape`:调整图片的宽度为此大小以进行处理,默认值为 `768`; + - `save_analysis_res`:保存 MFD 解析结果图像的文件名;默认值为 `None`,表示不保存; + - `mfr_batch_size`:MFR 预测时使用的批大小;在 GPU 上运行时,建议将此值设置为大于 `1`;默认值为 `1`; + - `embed_sep`:嵌入 LaTeX 的前缀和后缀;仅在 `return_text` 为 `True` 时有效;默认值为 `(' $', '$ ')`; + - `isolated_sep`:孤立 LaTeX 的前缀和后缀;仅在 `return_text` 为 `True` 时有效;默认值为两个美元符号; + - `line_sep`:文本行之间的分隔符;仅在 `return_text` 为 `True` 时有效;默认值为换行符; + - `auto_line_break`:自动换行识别的文本;仅在 `return_text` 为 `True` 时有效;默认值为 `True`; + - `det_text_bbox_max_width_expand_ratio`:扩展检测文本框的宽度。此值表示相对于原始框高度的最大扩展比率;默认值为 `0.3`; + - `det_text_bbox_max_height_expand_ratio`:扩展检测文本框的高度。此值表示相对于原始框高度的最大扩展比率;默认值为 `0.2`; + - `embed_ratio_threshold`:嵌入公式和文本行之间的重叠阈值;默认值为 `0.6`。当嵌入公式和文本行之间的重叠大于或等于此阈值时,认为嵌入公式和文本行在同一行;否则,认 + - `table_as_image`:如果为 `True`,则将表格识别为图像;默认值为 `False` + - `formula_rec_kwargs`:传递给公式识别器 `latex_ocr` 的生成参数;默认值为 `{}` + +**返回值**:当 `return_text` 为 `True` 时,返回一个字符串;当 `return_text` 为 `False` 时,返回一个有序的(从上到下,从左到右)字典列表,每个字典表示一个检测框,包含以下键值: + - `type`:图像的类别;可选值:'text'、'isolated'、'embedding' + - `text`:识别的文本或 LaTeX 公式 + - `score`:置信度分数 [0, 1];分数越高,置信度越高 + - `position`:块的位置信息,`np.ndarray`,形状为 `[4, 2]` + - `line_number`:框的行号(第一行 `line_number==0`),具有相同值的框表示它们在同一行 + +**调用示例**: + +```python +from pix2text import Pix2Text + +img_fp = 'examples/mixed.jpg' +p2t = Pix2Text.from_config() +out = p2t.recognize_text_formula( + img_fp, + save_analysis_res=f'./output-debug', +) +``` + +### 4. 函数 `.recognize_formula()` + +此函数用于识别一张纯公式的图片中的内容,如示例图片 [examples/formula2.png](examples/formula2.png)。 +函数定义如下: + +```python +def recognize_formula( + self, + imgs: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]], + batch_size: int = 1, + return_text: bool = True, + rec_config: Optional[dict] = None, + **kwargs, +) -> Union[str, List[str], Dict[str, Any], List[Dict[str, Any]]]: + """ + Recognize pure Math Formula images to LaTeX Expressions + Args: + imgs (Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]): The image or list of images + batch_size (int): The batch size + return_text (bool): Whether to return only the recognized text; default value is `True` + rec_config (Optional[dict]): The config for recognition + **kwargs (): Special model parameters. Not used for now + + Returns: The LaTeX Expression or list of LaTeX Expressions; + str or List[str] when `return_text` is True; + Dict[str, Any] or List[Dict[str, Any]] when `return_text` is False, with the following keys: + + * `text`: The recognized LaTeX text + * `score`: The confidence score [0, 1]; the higher, the more confident + + """ +``` + +**函数说明**: + +* 输入参数 `imgs`:图片路径或者 `Image.Image` 对象,或者图片路径或者 `Image.Image` 对象的列表; +* 输入参数 `batch_size`:批大小,默认值为 `1`; +* 输入参数 `return_text`:是否返回纯文本;取值为 `False` 时返回带有结构化信息的 list;默认值为 `True`; +* 输入参数 `rec_config`:识别配置,可选值; +* 输入参数 `kwargs`:其他参数,目前未使用。 + +**返回值**:当 `return_text` 为 `True` 时,返回一个字符串;当 `return_text` 为 `False` 时,返回一个有序的(从上到下,从左到右)字典列表,每个字典表示一个检测框,包含以下键值: + - `text`:识别的 LaTeX 文本 + - `score`:置信度分数 [0, 1];分数越高,置信度越高 + +**调用示例**: + +```python +from pix2text import Pix2Text + +img_fp = 'examples/formula2.png' +p2t = Pix2Text.from_config() +out = p2t.recognize_formula( + img_fp, + save_analysis_res=f'./output-debug', +) +``` + +### 5. 函数 `.recognize_text()` + +此函数用于识别一张纯文字的图片中的内容,如示例图片 [examples/general.jpg](examples/general.jpg)。 +函数定义如下: + +```python +def recognize_text( + self, + imgs: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]], + return_text: bool = True, + rec_config: Optional[dict] = None, + **kwargs, +) -> Union[str, List[str], List[Any], List[List[Any]]]: + """ + Recognize a pure Text Image. + Args: + imgs (Union[str, Path, Image.Image], List[str], List[Path], List[Image.Image]): The image or list of images + return_text (bool): Whether to return only the recognized text; default value is `True` + rec_config (Optional[dict]): The config for recognition + kwargs (): Other parameters for `text_ocr.ocr()` + + Returns: Text str or list of text strs when `return_text` is True; + `List[Any]` or `List[List[Any]]` when `return_text` is False, with the same length as `imgs` and the following keys: + + * `position`: Position information of the block, `np.ndarray`, with a shape of [4, 2] + * `text`: The recognized text + * `score`: The confidence score [0, 1]; the higher, the more confident + + """ +``` + +**函数说明**: + +* 输入参数 `imgs`:图片路径或者 `Image.Image` 对象,或者图片路径或者 `Image.Image` 对象的列表; +* 输入参数 `return_text`:是否返回纯文本;取值为 `False` 时返回带有结构化信息的 list;默认值为 `True`; +* 输入参数 `rec_config`:识别配置,可选值; +* 输入参数 `kwargs`:其他参数,具体说明参考函数 `text_ocr.ocr()`。 + +**返回值**:当 `return_text` 为 `True` 时,返回一个字符串;当 `return_text` 为 `False` 时,返回一个有序的(从上到下,从左到右)字典列表,每个字典表示一个检测框,包含以下键值: + - `position`:块的位置信息,`np.ndarray`,形状为 `[4, 2]` + - `text`:识别的文本 + - `score`:置信度分数 [0, 1];分数越高,置信度越高 + +**调用示例**: + +```python +from pix2text import Pix2Text + +img_fp = 'examples/general.jpg' +p2t = Pix2Text.from_config() +out = p2t.recognize_text(img_fp) +``` + +### 6. 函数 `.recognize()` + +是不是觉得上面的接口太丰富了,使用起来有点麻烦?没关系,这个函数可以根据指定的图片类型调用上面的不同函数进行识别。 + +```python +def recognize( + self, + img: Union[str, Path, Image.Image], + file_type: Literal[ + 'pdf', 'page', 'text_formula', 'formula', 'text' + ] = 'text_formula', + **kwargs, +) -> Union[Document, Page, str, List[str], List[Any], List[List[Any]]]: + """ + Recognize the content of the image or pdf file according to the specified type. + It will call the corresponding recognition function `.recognize_{file_type}()` according to the `file_type`. + Args: + img (Union[str, Path, Image.Image]): The image/pdf file path or `Image.Image` object + file_type (str): Supported image types: 'pdf', 'page', 'text_formula', 'formula', 'text' + **kwargs (dict): Arguments for the corresponding recognition function + + Returns: recognized results + + """ +``` + +**函数说明**: + +* 输入参数 `img`:图片/PDF文件路径或者 `Image.Image` 对象; +* 输入参数 `file_type`:图片类型,可选值为 `'pdf'`, `'page'`, `'text_formula'`, `'formula'`, `'text'`; +* 输入参数 `kwargs`:其他参数,具体说明参考上面的函数。 + +**返回值**:根据 `file_type` 的不同,返回不同的结果。具体说明参考上面的函数。 + +**调用示例**: + +```python +from pix2text import Pix2Text + +img_fp = 'examples/general.jpg' +p2t = Pix2Text.from_config() +out = p2t.recognize(img_fp, file_type='text') # 等价于 p2t.recognize_text(img_fp) +``` + + +更多使用示例请参见 [tests/test_pix2text.py](https://github.com/breezedeus/Pix2Text/blob/main/tests/test_pix2text.py)。 diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000000000000000000000000000000000000..b80823305aadea54705001eaeedbb393ea705463 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,119 @@ +# Project information +site_name: Pix2Text +site_url: https://pix2text.readthedocs.io +site_description: Pix2Text Online Documents +site_author: Breezedeus + +# Repository +repo_url: https://github.com/breezedeus/pix2text +repo_name: Breezedeus/Pix2Text +edit_uri: "" #disables edit button + +# Copyright +copyright: Copyright © 2022 - 2024 + +# Social media +extra: + social: + - icon: fontawesome/brands/github + link: https://github.com/breezedeus + - icon: fontawesome/brands/zhihu + link: https://www.zhihu.com/people/breezedeus-50 + - icon: fontawesome/brands/youtube + link: https://www.youtube.com/@breezedeus + - icon: fontawesome/brands/youtube + link: https://space.bilibili.com/509307267 + - icon: fontawesome/brands/twitter + link: https://twitter.com/breezedeus + +# Configuration +theme: + name: material +# name: readthedocs + logo: figs/breezedeus.png + favicon: figs/breezedeus.ico + palette: + primary: indigo + accent: indigo + font: + text: Roboto + code: Roboto Mono + features: + - navigation.tabs + - navigation.expand + icon: + repo: fontawesome/brands/github + +# Extensions +markdown_extensions: + - meta + - pymdownx.emoji: + emoji_index: !!python/name:materialx.emoji.twemoji + emoji_generator: !!python/name:materialx.emoji.to_svg + - admonition # alerts + - pymdownx.details # collapsible alerts + - pymdownx.superfences # nest code and content inside alerts + - attr_list # add HTML and CSS to Markdown elements + - md_in_html + - pymdownx.inlinehilite # inline code highlights + - pymdownx.keys # show keystroke symbols + - pymdownx.snippets # insert content from other files + - pymdownx.tabbed # content tabs + - footnotes + - def_list + - pymdownx.arithmatex: # mathjax + generic: true + - pymdownx.tasklist: + custom_checkbox: true + clickable_checkbox: false + - codehilite + - pymdownx.highlight: + use_pygments: true + - toc: + toc_depth: 4 + +# Plugins +plugins: + - search + - macros + - mkdocstrings: + default_handler: python + handlers: + python: + rendering: + show_root_heading: false + show_source: true + show_category_heading: true + watch: + - cnocr + +# Extra CSS +extra_css: + - static/css/custom.css + +# Extra JS +extra_javascript: + - https://cdnjs.cloudflare.com/ajax/libs/tablesort/5.2.1/tablesort.min.js + - https://polyfill.io/v3/polyfill.min.js?features=es6 + - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js + +# Page tree +nav: + - 🏠 Home: index.md + - 🛠️ Install: install.md + - 🛀🏻 Demo: demo.md + - 🧳 Models: models.md + - 📚 Examples: examples.md + - 📖 Usage: usage.md + - 🎮 APIs: + - Pix2Text: pix2text/pix_to_text.md + - TextFormulaOCR: pix2text/text_formula_ocr.md + - LatexOCR: pix2text/latex_ocr.md + - TableOCR: pix2text/table_ocr.md + - 💬 Contact: contact.md + - 🎛️ More: + - 🏄🏻 ‍️Command Tools: command.md + - 🕹 Model Training: train.md + - 🗒 RELEASE Notes: RELEASE.md + - 🙋🏽 FAQ: faq.md + - 🥤 Buy Me Coffee: buymeacoffee.md diff --git a/pix2text/__init__.py b/pix2text/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6dbb12981bd6ff51970f268eaef98613f1ad3ea9 --- /dev/null +++ b/pix2text/__init__.py @@ -0,0 +1,14 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). + +from .utils import read_img, set_logger, merge_line_texts +from .render import render_html +from .doc_xl_layout import DocXLayoutParser +# from .layoutlmv3 import LayoutLMv3LayoutParser +# from .doc_yolo_layout_parser import DocYoloLayoutParser +from .latex_ocr import LatexOCR +from .formula_detector import MathFormulaDetector +from .text_formula_ocr import TextFormulaOCR +from .table_ocr import TableOCR +from .pix_to_text import Pix2Text diff --git a/pix2text/__version__.py b/pix2text/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ba119332568710e088d3df9d70736e769ca7c40 --- /dev/null +++ b/pix2text/__version__.py @@ -0,0 +1,5 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2025, [Breezedeus](https://www.breezedeus.com). + +__version__ = '1.1.4' diff --git a/pix2text/app.py b/pix2text/app.py new file mode 100644 index 0000000000000000000000000000000000000000..cffd2ff30f15a713e1e248384da4ad893617b302 --- /dev/null +++ b/pix2text/app.py @@ -0,0 +1,61 @@ +# coding: utf-8 +# Copyright (C) 2022, [Breezedeus](https://github.com/breezedeus). + +from PIL import Image +import streamlit as st + +from pix2text import set_logger, Pix2Text + +logger = set_logger() +st.set_page_config(layout="wide") + + +@st.cache(allow_output_mutation=True) +def get_model(): + return Pix2Text() + + +def main(): + p2t = get_model() + + title = '开源工具 Pix2Text Demo' + st.markdown(f"

{title}

", unsafe_allow_html=True) + + subtitle = '作者:breezedeus; ' \ + '欢迎加入 交流群' + + st.markdown(f"
{subtitle}
", unsafe_allow_html=True) + st.markdown('') + st.subheader('选择待识别图片') + content_file = st.file_uploader('', type=["png", "jpg", "jpeg", "webp"]) + if content_file is None: + st.stop() + + try: + img = Image.open(content_file).convert('RGB') + img.save('ori.jpg') + + out = p2t(img) + logger.info(out) + st.markdown('##### 原始图片:') + cols = st.columns([1, 3, 1]) + with cols[1]: + st.image(content_file) + + st.subheader('识别结果:') + st.markdown(f"* **图片类型**:{out['image_type']}") + st.markdown("* **识别内容**:") + + cols = st.columns([1, 3, 1]) + with cols[1]: + st.text(out['text']) + + if out['image_type'] == 'formula': + st.markdown(f"$${out['text']}$$") + + except Exception as e: + st.error(e) + + +if __name__ == '__main__': + main() diff --git a/pix2text/cli.py b/pix2text/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..227f52ed3380de65c767eab6b05cc45005e6beb2 --- /dev/null +++ b/pix2text/cli.py @@ -0,0 +1,751 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). + +import os +import logging +import glob +import json +from multiprocessing import Process +from pprint import pformat + +import click + +from pix2text import set_logger, Pix2Text + +_CONTEXT_SETTINGS = {"help_option_names": ['-h', '--help']} +logger = set_logger(log_level=logging.INFO) + + +@click.group(context_settings=_CONTEXT_SETTINGS) +def cli(): + pass + + +@cli.command('predict') +@click.option( + "-l", + "--languages", + type=str, + default='en,ch_sim', + help="Language Codes for Text-OCR to recognize, separated by commas", + show_default=True, +) +@click.option( + "--layout-config", + type=str, + default=None, + help="Configuration information for the layout parser model, in JSON string format. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--mfd-config", + type=str, + default=None, + help="Configuration information for the MFD model, in JSON string format. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--formula-ocr-config", + type=str, + default=None, + help="Configuration information for the Latex-OCR mathematical formula recognition model. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--text-ocr-config", + type=str, + default=None, + help="Configuration information for Text-OCR recognition, in JSON string format. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--enable-formula/--disable-formula", + default=True, + help="Whether to enable formula recognition", + show_default=True, +) +@click.option( + "--enable-table/--disable-table", + default=True, + help="Whether to enable table recognition", + show_default=True, +) +@click.option( + "-d", + "--device", + help="Choose to run the code using `cpu`, `gpu`, or a specific GPU like `cuda:0`", + type=str, + default='cpu', + show_default=True, +) +@click.option( + "--file-type", + type=click.Choice(['pdf', 'page', 'text_formula', 'formula', 'text']), + default='text_formula', + help="Which file type to process, 'pdf', 'page', 'text_formula', 'formula', or 'text'", + show_default=True, +) +@click.option( + "--resized-shape", + help="Resize the image width to this size before processing", + type=int, + default=768, + show_default=True, +) +@click.option( + "-i", + "--img-file-or-dir", + required=True, + help="File path of the input image/pdf or the specified directory", +) +@click.option( + "--save-debug-res", + default=None, + help="If `save_debug_res` is set, the directory to save the debug results; default value is `None`, which means not to save", + show_default=True, +) +@click.option( + "--rec-kwargs", + type=str, + default=None, + help="kwargs for calling .recognize(), in JSON string format", + show_default=True, +) +@click.option( + "--return-text/--no-return-text", + default=True, + help="Whether to return only the text result", + show_default=True, +) +@click.option( + "--auto-line-break/--no-auto-line-break", + default=True, + help="Whether to automatically determine to merge adjacent line results into a single line result", + show_default=True, +) +@click.option( + "-o", + "--output-dir", + default='output-md', + help="Output directory for the recognized text results. Only effective when `file-type` is `pdf` or `page`", + show_default=True, +) +@click.option( + "--log-level", + default='INFO', + help="Log Level, such as `INFO`, `DEBUG`", + show_default=True, +) +def predict( + languages, + layout_config, + mfd_config, + formula_ocr_config, + text_ocr_config, + enable_formula, + enable_table, + device, + file_type, + resized_shape, + img_file_or_dir, + save_debug_res, + rec_kwargs, + return_text, + auto_line_break, + output_dir, + log_level, +): + """Use Pix2Text (P2T) to predict the text information in an image or PDF.""" + logger = set_logger(log_level=log_level) + + mfd_config = json.loads(mfd_config) if mfd_config else {} + formula_ocr_config = json.loads(formula_ocr_config) if formula_ocr_config else {} + languages = [lang.strip() for lang in languages.split(',') if lang.strip()] + text_ocr_config = json.loads(text_ocr_config) if text_ocr_config else {} + + layout_config = json.loads(layout_config) if layout_config else {} + text_formula_config = { + 'languages': languages, # 'en,ch_sim + 'mfd': mfd_config, + 'formula': formula_ocr_config, + 'text': text_ocr_config, + } + total_config = { + 'layout': layout_config, + 'text_formula': text_formula_config, + } + p2t = Pix2Text.from_config( + total_configs=total_config, + enable_formula=enable_formula, + enable_table=enable_table, + device=device, + ) + + fp_list = [] + if os.path.isfile(img_file_or_dir): + fp_list.append(img_file_or_dir) + if save_debug_res: + save_debug_res = [save_debug_res] + elif os.path.isdir(img_file_or_dir): + fn_list = glob.glob1(img_file_or_dir, '*g') + fp_list = [os.path.join(img_file_or_dir, fn) for fn in fn_list] + if save_debug_res: + os.makedirs(save_debug_res, exist_ok=True) + save_debug_res = [ + os.path.join(save_debug_res, 'output-debugs-' + fn) for fn in fn_list + ] + else: + raise ValueError(f'{img_file_or_dir} is not a valid file or directory') + + rec_kwargs = json.loads(rec_kwargs) if rec_kwargs else {} + rec_kwargs['resized_shape'] = resized_shape + rec_kwargs['return_text'] = return_text + rec_kwargs['auto_line_break'] = auto_line_break + + for idx, fp in enumerate(fp_list): + if file_type in ('pdf', 'page'): + rec_kwargs['save_debug_res'] = ( + save_debug_res[idx] if save_debug_res is not None else None + ) + else: + rec_kwargs['save_analysis_res'] = ( + save_debug_res[idx] if save_debug_res is not None else None + ) + out = p2t.recognize(fp, file_type=file_type, **rec_kwargs) + if file_type in ('pdf', 'page'): + out = out.to_markdown(output_dir) + logger.info( + f'In image: {fp}\nOuts: \n{out if isinstance(out, str) else pformat(out)}\n' + ) + + +@cli.command('evaluate') +@click.option( + "-l", + "--languages", + type=str, + default='en,ch_sim', + help="Language Codes for Text-OCR to recognize, separated by commas", + show_default=True, +) +@click.option( + "--layout-config", + type=str, + default=None, + help="Configuration information for the layout parser model, in JSON string format. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--mfd-config", + type=str, + default=None, + help="Configuration information for the MFD model, in JSON string format. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--formula-ocr-config", + type=str, + default=None, + help="Configuration information for the Latex-OCR mathematical formula recognition model. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--text-ocr-config", + type=str, + default=None, + help="Configuration information for Text-OCR recognition, in JSON string format. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--enable-formula/--disable-formula", + default=True, + help="Whether to enable formula recognition", + show_default=True, +) +@click.option( + "--enable-table/--disable-table", + default=True, + help="Whether to enable table recognition", + show_default=True, +) +@click.option( + "-d", + "--device", + help="Choose to run the code using `cpu`, `gpu`, or a specific GPU like `cuda:0`", + type=str, + default='cpu', + show_default=True, +) +@click.option( + "--file-type", + type=click.Choice(['pdf', 'page', 'text_formula', 'formula', 'text']), + default='text_formula', + help="Which file type to process, 'pdf', 'page', 'text_formula', 'formula', or 'text'", + show_default=True, +) +@click.option( + "--resized-shape", + help="Resize the image width to this size before processing", + type=int, + default=768, + show_default=True, +) +@click.option( + "-i", + "--input-json", + required=True, + help="JSON file containing evaluation data with image paths and ground truth", +) +@click.option( + "--gt-key", + default="model_result", + help="Key name for ground truth text in the JSON data", + show_default=True, +) +@click.option( + "--prefix-img-dir", + default="data", + help="Root directory for image files, will be prepended to img_path in JSON", + show_default=True, +) +@click.option( + "--rec-kwargs", + type=str, + default=None, + help="kwargs for calling .recognize(), in JSON string format", + show_default=True, +) +@click.option( + "--auto-line-break/--no-auto-line-break", + default=True, + help="Whether to automatically determine to merge adjacent line results into a single line result", + show_default=True, +) +@click.option( + "-o", + "--output-json", + default='evaluation_results.json', + help="Output JSON file for evaluation results", + show_default=True, +) +@click.option( + "--output-excel", + default=None, + help="Output Excel file with embedded images (optional)", + show_default=True, +) +@click.option( + "--output-html", + default=None, + help="Output HTML report with embedded images (optional)", + show_default=True, +) +@click.option( + "--max-img-width", + default=400, + help="Maximum width for embedded images in pixels", + show_default=True, +) +@click.option( + "--max-img-height", + default=300, + help="Maximum height for embedded images in pixels", + show_default=True, +) +@click.option( + "--max-samples", + default=-1, + help="Maximum number of samples to process (-1 for all samples)", + show_default=True, +) +@click.option( + "--log-level", + default='INFO', + help="Log Level, such as `INFO`, `DEBUG`", + show_default=True, +) +def evaluate( + languages, + layout_config, + mfd_config, + formula_ocr_config, + text_ocr_config, + enable_formula, + enable_table, + device, + file_type, + resized_shape, + input_json, + gt_key, + prefix_img_dir, + rec_kwargs, + auto_line_break, + output_json, + output_excel, + output_html, + max_img_width, + max_img_height, + max_samples, + log_level, +): + """Evaluate Pix2Text (P2T) performance using a JSON file with image paths and ground truth.""" + from pix2text.utils import ( + calculate_cer_batch, + calculate_cer, + save_evaluation_results_to_excel_with_images, + create_html_report_with_images + ) + + logger = set_logger(log_level=log_level) + + # Load evaluation data + try: + with open(input_json, 'r', encoding='utf-8') as f: + eval_data = json.load(f) + except Exception as e: + logger.error(f"Failed to load evaluation data from {input_json}: {e}") + return + + if not isinstance(eval_data, list): + logger.error("Evaluation data must be a list of dictionaries") + return + + # Validate data format + for i, item in enumerate(eval_data): + if not isinstance(item, dict): + logger.error(f"Item {i} is not a dictionary") + return + if 'img_path' not in item or gt_key not in item: + logger.error(f"Item {i} missing required keys 'img_path' or '{gt_key}'") + return + + # Initialize Pix2Text + mfd_config = json.loads(mfd_config) if mfd_config else {} + formula_ocr_config = json.loads(formula_ocr_config) if formula_ocr_config else {} + languages = [lang.strip() for lang in languages.split(',') if lang.strip()] + text_ocr_config = json.loads(text_ocr_config) if text_ocr_config else {} + + layout_config = json.loads(layout_config) if layout_config else {} + text_formula_config = { + 'languages': languages, + 'mfd': mfd_config, + 'formula': formula_ocr_config, + 'text': text_ocr_config, + } + total_config = { + 'layout': layout_config, + 'text_formula': text_formula_config, + } + p2t = Pix2Text.from_config( + total_configs=total_config, + enable_formula=enable_formula, + enable_table=enable_table, + device=device, + ) + + # Prepare recognition kwargs + rec_kwargs = json.loads(rec_kwargs) if rec_kwargs else {} + rec_kwargs['resized_shape'] = resized_shape + rec_kwargs['return_text'] = True + rec_kwargs['auto_line_break'] = auto_line_break + + def filter_and_clean_gt(gt): + # 只针对部分的图片进行识别 + # 去掉收尾的'"' + if not gt: + return False, gt + if gt.startswith(r'$$') and gt.endswith(r'$$'): + gt = gt[2:-2] + if '$$' not in gt: + return True, gt.strip() + return False, gt + + # Process each image and collect results + predictions = [] + ground_truths = [] + results = [] + + # Apply max_samples limit + if max_samples > 0: + import random + random.seed(42) + random.shuffle(eval_data) + + logger.info(f"Limited to {max_samples} samples for evaluation") + logger.info(f"Starting evaluation on {len(eval_data)} images...") + + for i, item in enumerate(eval_data): + if len(results) >= max_samples: + break + img_path = item['new_img_path'] + ground_truth = item[gt_key] + + # Handle ground truth that might be a JSON string + if isinstance(ground_truth, str): + try: + ground_truth = json.loads(ground_truth) + except json.JSONDecodeError: + # If it's not valid JSON, use as is + pass + + # Apply formula filtering if needed + is_formula, ground_truth = filter_and_clean_gt(ground_truth) + if not is_formula: + continue + + # Prepend prefix_img_dir to img_path if it's not an absolute path + if not os.path.isabs(img_path): + img_path = os.path.join(prefix_img_dir, img_path) + + logger.info(f"Processing image {i+1}/{len(eval_data)}: {img_path}") + + try: + # Check if image file exists + if not os.path.exists(img_path): + logger.warning(f"Image file not found: {img_path}") + continue + + # Recognize text + prediction = p2t.recognize(img_path, file_type=file_type, **rec_kwargs) + + # Convert to string if needed + if not isinstance(prediction, str): + if hasattr(prediction, 'to_markdown'): + prediction = prediction.to_markdown() + else: + prediction = str(prediction) + + predictions.append(prediction) + ground_truths.append(ground_truth) + + # Calculate individual CER + cer = calculate_cer(prediction, ground_truth) + + result = { + 'img_path': img_path, + 'ground_truth': ground_truth, + 'prediction': prediction, + 'cer': cer + } + results.append(result) + + logger.info(f"Image {img_path} CER: {cer:.4f}") + + except Exception as e: + logger.error(f"Error processing image {img_path}: {e}") + continue + + # resort results by cer + # results.sort(key=lambda x: x['cer'], reverse=True) + + # Calculate overall CER + if predictions and ground_truths: + cer_stats = calculate_cer_batch(predictions, ground_truths) + + # Prepare final results + evaluation_results = { + 'summary': { + 'total_samples': len(results), + 'average_cer': cer_stats['average_cer'], + 'individual_cers': cer_stats['individual_cers'] + }, + 'detailed_results': results + } + + # Save results + try: + with open(output_json, 'w', encoding='utf-8') as f: + json.dump(evaluation_results, f, ensure_ascii=False, indent=2) + logger.info(f"Evaluation results saved to: {output_json}") + except Exception as e: + logger.error(f"Failed to save evaluation results: {e}") + + # Print summary + logger.info("=" * 50) + logger.info("EVALUATION SUMMARY") + logger.info("=" * 50) + logger.info(f"Total samples processed: {len(results)}") + logger.info(f"Average CER: {cer_stats['average_cer']:.4f}") + logger.info(f"Best CER: {min(cer_stats['individual_cers']):.4f}") + logger.info(f"Worst CER: {max(cer_stats['individual_cers']):.4f}") + logger.info("=" * 50) + + else: + logger.error("No valid predictions generated") + + # Save results to Excel with embedded images (if requested) + if output_excel and results: + excel_success = save_evaluation_results_to_excel_with_images( + results=results, + output_file=output_excel, + img_path_key='img_path', + gt_key='ground_truth', + pred_key='prediction', + cer_key='cer', + max_img_width=max_img_width, + max_img_height=max_img_height + ) + if excel_success: + logger.info(f"Excel file with embedded images saved to: {output_excel}") + else: + logger.warning("Failed to save Excel file with embedded images") + + # Save results to HTML report with embedded images (if requested) + if output_html and results: + html_success = create_html_report_with_images( + results=results, + output_file=output_html, + img_path_key='img_path', + gt_key='ground_truth', + pred_key='prediction', + cer_key='cer', + max_img_width=max_img_width, + max_img_height=max_img_height + ) + if html_success: + logger.info(f"HTML report with embedded images saved to: {output_html}") + else: + logger.warning("Failed to save HTML report with embedded images") + + +@cli.command('serve') +@click.option( + "-l", + "--languages", + type=str, + default='en,ch_sim', + help="Language Codes for Text-OCR to recognize, separated by commas", + show_default=True, +) +@click.option( + "--layout-config", + type=str, + default=None, + help="Configuration information for the layout parser model, in JSON string format. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--mfd-config", + type=str, + default=None, + help="Configuration information for the MFD model, in JSON string format. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--formula-ocr-config", + type=str, + default=None, + help="Configuration information for the Latex-OCR mathematical formula recognition model. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--text-ocr-config", + type=str, + default=None, + help="Configuration information for Text-OCR recognition, in JSON string format. Default: `None`, meaning using the default configuration", + show_default=True, +) +@click.option( + "--enable-formula/--disable-formula", + default=True, + help="Whether to enable formula recognition", + show_default=True, +) +@click.option( + "--enable-table/--disable-table", + default=True, + help="Whether to enable table recognition", + show_default=True, +) +@click.option( + "-d", + "--device", + help="Choose to run the code using `cpu`, `gpu`, or a specific GPU like `cuda:0`", + type=str, + default='cpu', + show_default=True, +) +@click.option( + "-o", + "--output-md-root-dir", + default='output-md-root', + help="Markdown output root directory for the recognized text results. Only effective when `file-type` is `pdf` or `page`", + show_default=True, +) +@click.option( + '-H', '--host', type=str, default='0.0.0.0', help='server host', show_default=True, +) +@click.option( + '-p', '--port', type=int, default=8503, help='server port', show_default=True, +) +@click.option( + '--reload', + is_flag=True, + help='whether to reload the server when the codes have been changed', + show_default=True, +) +@click.option( + "--log-level", + default='INFO', + help="Log Level, such as `INFO`, `DEBUG`", + show_default=True, +) +def serve( + languages, + layout_config, + mfd_config, + formula_ocr_config, + text_ocr_config, + enable_formula, + enable_table, + device, + output_md_root_dir, + host, + port, + reload, + log_level, +): + """Start the HTTP service.""" + from pix2text.serve import start_server + + logger = set_logger(log_level=log_level) + + analyzer_config = json.loads(mfd_config) if mfd_config else {} + formula_ocr_config = json.loads(formula_ocr_config) if formula_ocr_config else {} + languages = [lang.strip() for lang in languages.split(',') if lang.strip()] + text_ocr_config = json.loads(text_ocr_config) if text_ocr_config else {} + + layout_config = json.loads(layout_config) if layout_config else {} + text_formula_config = { + 'languages': languages, # 'en,ch_sim + 'mfd': analyzer_config, + 'formula': formula_ocr_config, + 'text': text_ocr_config, + } + total_config = { + 'layout': layout_config, + 'text_formula': text_formula_config, + } + p2t_config = dict( + total_configs=total_config, + enable_formula=enable_formula, + enable_table=enable_table, + device=device, + ) + api = Process( + target=start_server, + kwargs={ + 'p2t_config': p2t_config, + 'output_md_root_dir': output_md_root_dir, + 'host': host, + 'port': port, + 'reload': reload, + }, + ) + api.start() + api.join() + + +if __name__ == "__main__": + cli() diff --git a/pix2text/consts.py b/pix2text/consts.py new file mode 100644 index 0000000000000000000000000000000000000000..1ff373278f71248b8787b2ff384d217c85688b5f --- /dev/null +++ b/pix2text/consts.py @@ -0,0 +1,196 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). +import os +import logging +from collections import OrderedDict +from copy import copy, deepcopy +from typing import Set, Tuple, Dict, Any, Optional + +from .__version__ import __version__ + +logger = logging.getLogger(__name__) + +# 模型版本只对应到第二层,第三层的改动表示模型兼容。 +# 如: __version__ = '1.0.*',对应的 MODEL_VERSION 都是 '1.0' +MODEL_VERSION = '.'.join(__version__.split('.', maxsplit=2)[:2]) +DOWNLOAD_SOURCE = os.environ.get('PIX2TEXT_DOWNLOAD_SOURCE', 'HF') + +CN_OSS_ENDPOINT = ( + "https://sg-models.oss-cn-beijing.aliyuncs.com/pix2text/%s/" % MODEL_VERSION +) + + +def format_model_info(info: dict) -> dict: + out_dict = copy(info) + out_dict['cn_oss'] = CN_OSS_ENDPOINT + return out_dict + + +class AvailableModels(object): + P2T_SPACE = '__pix2text__' + + FREE_MODELS = OrderedDict( + { + ('mfr', 'onnx'): { + 'filename': 'p2t-mfr-onnx.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfr', + 'local_model_id': 'mfr-onnx', + }, + ('mfd', 'onnx'): { + 'filename': 'p2t-mfd-onnx.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfd', + 'local_model_id': 'mfd-onnx', + }, + ('mfd-1.5', 'onnx'): { + # 'filename': 'p2t-mfd-onnx.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfd-1.5', + 'local_model_id': 'mfd-1.5-onnx', + }, + ('mfr-1.5', 'onnx'): { + # 'filename': 'p2t-mfr-onnx.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfr-1.5', + 'local_model_id': 'mfr-1.5-onnx', + }, + } + ) + + PAID_MODELS = OrderedDict( + { + ('mfr', 'pytorch'): { + 'filename': 'p2t-mfr-pytorch.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfr-pytorch', + 'local_model_id': 'mfr-pytorch', + }, + ('mfr-pro', 'onnx'): { + 'filename': 'p2t-mfr-pro-onnx.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfr-pro', + 'local_model_id': 'mfr-pro-onnx', + }, + ('mfr-pro', 'pytorch'): { + 'filename': 'p2t-mfr-pro-pytorch.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfr-pro-pytorch', + 'local_model_id': 'mfr-pro-pytorch', + }, + ('mfr-plus', 'onnx'): { + 'filename': 'p2t-mfr-plus-onnx.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfr-plus', + 'local_model_id': 'mfr-plus-onnx', + }, + ('mfr-plus', 'pytorch'): { + 'filename': 'p2t-mfr-plus-pytorch.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfr-plus-pytorch', + 'local_model_id': 'mfr-plus-pytorch', + }, + ('mfd', 'pytorch'): { + 'filename': 'p2t-mfd-pytorch.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfd-pytorch', + 'local_model_id': 'mfd-pytorch', + }, + ('mfd-advanced', 'onnx'): { + 'filename': 'p2t-mfd-advanced-onnx.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfd-advanced', + 'local_model_id': 'mfd-advanced-onnx', + }, + ('mfd-advanced', 'pytorch'): { + 'filename': 'p2t-mfd-advanced-pytorch.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfd-advanced-pytorch', + 'local_model_id': 'mfd-advanced-pytorch', + }, + ('mfd-pro', 'onnx'): { + 'filename': 'p2t-mfd-pro-onnx.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfd-pro', + 'local_model_id': 'mfd-pro-onnx', + }, + ('mfd-pro', 'pytorch'): { + 'filename': 'p2t-mfd-pro-pytorch.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfd-pro-pytorch', + 'local_model_id': 'mfd-pro-pytorch', + }, + ('mfd-1.5', 'pytorch'): { + # 'filename': 'p2t-mfd-1.5-pytorch.zip', + 'hf_model_id': 'breezedeus/pix2text-mfd-1.5-pytorch', + 'local_model_id': 'mfd-1.5-pytorch', + }, + ('mfd-advanced-1.5', 'onnx'): { + # 'filename': 'p2t-mfd-advanced-onnx.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfd-advanced-1.5', + 'local_model_id': 'mfd-advanced-1.5-onnx', + }, + ('mfd-advanced-1.5', 'pytorch'): { + # 'filename': 'p2t-mfd-advanced-pytorch.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfd-advanced-1.5-pytorch', + 'local_model_id': 'mfd-advanced-1.5-pytorch', + }, + ('mfd-pro-1.5', 'onnx'): { + # 'filename': 'p2t-mfd-pro-onnx.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfd-pro-1.5', + 'local_model_id': 'mfd-pro-1.5-onnx', + }, + ('mfd-pro-1.5', 'pytorch'): { + # 'filename': 'p2t-mfd-pro-pytorch.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfd-pro-1.5-pytorch', + 'local_model_id': 'mfd-pro-1.5-pytorch', + }, + ('mfr-1.5', 'pytorch'): { + # 'filename': 'p2t-mfr-pytorch.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfr-1.5-pytorch', + 'local_model_id': 'mfr-1.5-pytorch', + }, + ('mfr-pro-1.5', 'onnx'): { + # 'filename': 'p2t-mfr-pro-onnx.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfr-pro-1.5', + 'local_model_id': 'mfr-pro-1.5-onnx', + }, + ('mfr-pro-1.5', 'pytorch'): { + # 'filename': 'p2t-mfr-pro-pytorch.zip', # download the file from CN OSS + 'hf_model_id': 'breezedeus/pix2text-mfr-pro-1.5-pytorch', + 'local_model_id': 'mfr-pro-1.5-pytorch', + }, + } + ) + + P2T_MODELS = deepcopy(FREE_MODELS) + P2T_MODELS.update(PAID_MODELS) + OUTER_MODELS = {} + + def all_models(self) -> Set[Tuple[str, str]]: + return set(self.P2T_MODELS.keys()) | set(self.OUTER_MODELS.keys()) + + def __contains__(self, model_name_backend: Tuple[str, str]) -> bool: + return model_name_backend in self.all_models() + + def register_models(self, model_dict: Dict[Tuple[str, str], Any], space: str): + assert not space.startswith('__') + for key, val in model_dict.items(): + if key in self.P2T_MODELS or key in self.OUTER_MODELS: + logger.warning( + 'model %s has already existed, and will be ignored' % key + ) + continue + val = deepcopy(val) + val['space'] = space + self.OUTER_MODELS[key] = val + + def get_space(self, model_name, model_backend) -> Optional[str]: + if (model_name, model_backend) in self.P2T_MODELS: + return self.P2T_SPACE + elif (model_name, model_backend) in self.OUTER_MODELS: + return self.OUTER_MODELS[(model_name, model_backend)]['space'] + return self.P2T_SPACE + + def get_info(self, model_name, model_backend) -> Optional[dict]: + if (model_name, model_backend) in self.P2T_MODELS: + info = self.P2T_MODELS[(model_name, model_backend)] + elif (model_name, model_backend) in self.OUTER_MODELS: + info = self.OUTER_MODELS[(model_name, model_backend)] + else: + logger.warning( + 'no url is found for model %s' % ((model_name, model_backend),) + ) + return None + info = format_model_info(info) + return info + + +AVAILABLE_MODELS = AvailableModels() diff --git a/pix2text/doc_xl_layout/__init__.py b/pix2text/doc_xl_layout/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fbbe308f471b4ef5f62164ff9d44559ad7091b3d --- /dev/null +++ b/pix2text/doc_xl_layout/__init__.py @@ -0,0 +1,4 @@ +# coding: utf-8 +# This whole directory is adapted from https://github.com/AlibabaResearch/AdvancedLiterateMachinery. +# Thanks to the authors. +from .doc_xl_layout_parser import DocXLayoutParser \ No newline at end of file diff --git a/pix2text/doc_xl_layout/detectors/__init__.py b/pix2text/doc_xl_layout/detectors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pix2text/doc_xl_layout/detectors/base_detector_subfield.py b/pix2text/doc_xl_layout/detectors/base_detector_subfield.py new file mode 100644 index 0000000000000000000000000000000000000000..3263d72d504a40cb3c58a33e4a14b77ffd8f5f9b --- /dev/null +++ b/pix2text/doc_xl_layout/detectors/base_detector_subfield.py @@ -0,0 +1,206 @@ +# coding: utf-8 +import os +import time + +import cv2 +import numpy as np +import torch +from ..models.model import create_model, load_model + +# from ..utils.debugger import Debugger +from ..utils.image import get_affine_transform + + +class BaseDetector(object): + def __init__(self, opt): + # if opt.gpus[0] >= 0: + # opt.device = torch.device('cuda') + # else: + # opt.device = torch.device('cpu') + + self.model = create_model(opt.arch, opt.heads, opt.head_conv, opt.convert_onnx, {}) + self.model = load_model(self.model, opt.load_model) + self.model = self.model.to(opt.device) + self.model.eval() + + self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3) + self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3) + self.max_per_image = opt.K + self.num_classes = opt.num_classes + self.scales = opt.test_scales + self.opt = opt + self.pause = True + + def pre_process(self, image, scale, meta=None): + height, width = image.shape[0:2] + new_height = int(height * scale) + new_width = int(width * scale) + if self.opt.fix_res: + inp_height, inp_width = self.opt.input_h, self.opt.input_w + c = np.array([new_width / 2., new_height / 2.], dtype=np.float32) + s = max(height, width) * 1.0 + else: + inp_height = (new_height | self.opt.pad) # + 1 + inp_width = (new_width | self.opt.pad) # + 1 + c = np.array([new_width // 2, new_height // 2], dtype=np.float32) + s = np.array([inp_width, inp_height], dtype=np.float32) + + trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height]) + resized_image = cv2.resize(image, (new_width, new_height)) + inp_image = cv2.warpAffine( + resized_image, trans_input, (inp_width, inp_height), + flags=cv2.INTER_LINEAR) + vis_image = inp_image + # import pdb; pdb.set_trace() + inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32) + + images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width) + if self.opt.flip_test: + images = np.concatenate((images, images[:, :, :, ::-1]), axis=0) + images = torch.from_numpy(images) + meta = {'c': c, 's': s, + 'input_height': inp_height, + 'input_width': inp_width, + 'vis_image': vis_image, + 'out_height': inp_height // self.opt.down_ratio, + 'out_width': inp_width // self.opt.down_ratio} + return images, meta + + def resize(self, image): + h, w, _ = image.shape + scale = self.opt.input_h / (max(w, h) + 1e-4) + image = cv2.resize(image, (int(w * scale), int(h * scale))) + image = cv2.copyMakeBorder(image, 0, self.opt.input_h - int(h * scale), 0, self.opt.input_h - int(w * scale), + cv2.BORDER_CONSTANT, value=[0, 0, 0]) + return image, scale + + def process(self, images, return_time=False): + raise NotImplementedError + + def post_process(self, dets, meta, scale=1): + raise NotImplementedError + + def merge_outputs(self, detections): + raise NotImplementedError + + def debug(self, debugger, images, dets, output, scale=1): + raise NotImplementedError + + def show_results(self, debugger, image, results): + raise NotImplementedError + + def ps_convert_minmax(self, results): + detection = {} + for j in range(1, self.num_classes + 1): + detection[j] = [] + for j in range(1, self.num_classes + 1): + for bbox in results[j]: + if bbox[8] < self.opt.scores_thresh: + continue + minx = max(min(bbox[0], bbox[2], bbox[4], bbox[6]), 0) + miny = max(min(bbox[1], bbox[3], bbox[5], bbox[7]), 0) + maxx = max(bbox[0], bbox[2], bbox[4], bbox[6]) + maxy = max(bbox[1], bbox[3], bbox[5], bbox[7]) + detection[j].append([minx, miny, maxx, maxy, bbox[8], bbox[-1]]) + for j in range(1, self.num_classes + 1): + detection[j] = np.array(detection[j]) + return detection + + def Duplicate_removal(self, results): + bbox = [] + for box in results: + if box[8] > self.opt.scores_thresh: + # for i in range(8): + # if box[i] < 0: + # box[i] = 0 + # if box[i]>self.opt.input_h: + # box[i]=self.opt.input_h + bbox.append(box) + if len(bbox) > 0: + return np.array(bbox) + else: + return np.array([[0] * 12]) + + def run(self, image_or_path_or_tensor, meta=None): + load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0 + merge_time, tot_time = 0, 0 + # debugger = Debugger(dataset=self.opt.dataset, ipynb=(self.opt.debug == 3), num_classes=self.opt.num_classes, + # theme=self.opt.debugger_theme) + start_time = time.time() + pre_processed = False + if isinstance(image_or_path_or_tensor, np.ndarray): + image = image_or_path_or_tensor + elif type(image_or_path_or_tensor) == type(''): + image = cv2.imread(image_or_path_or_tensor) + else: + image = image_or_path_or_tensor['image'][0].numpy() + pre_processed_images = image_or_path_or_tensor + pre_processed = True + + loaded_time = time.time() + load_time += (loaded_time - start_time) + + detections = [] + for scale in self.scales: + scale_start_time = time.time() + if not pre_processed: + images, meta = self.pre_process(image, scale, meta) + else: + images = pre_processed_images['images'][scale][0] + meta = pre_processed_images['meta'][scale] + meta = {k: v.numpy()[0] for k, v in meta.items()} + + # import ipdb;ipdb.set_trace() + # images = np.load('data.npy').astype(np.float32) + # images = torch.from_numpy(images) + + images = images.to(self.opt.device) + # torch.cuda.synchronize() + pre_process_time = time.time() + pre_time += pre_process_time - scale_start_time + output, dets, dets_sub, corner, forward_time = self.process(images, return_time=True) + # torch.cuda.synchronize() + net_time += forward_time - pre_process_time + decode_time = time.time() + dec_time += decode_time - forward_time + + # if self.opt.debug >= 2: + # self.debug(debugger, images, dets, output, scale) + + dets, corner = self.post_process(dets, corner, meta, scale) + for j in range(1, self.num_classes + 1): + dets[j] = self.Duplicate_removal(dets[j]) + + # add sub + dets_sub, corner = self.post_process(dets_sub, corner, meta, scale) + for j in range(1, self.num_classes + 1): + dets_sub[j] = self.Duplicate_removal(dets_sub[j]) + + # import ipdb;ipdb.set_trace() + # torch.cuda.synchronize() + post_process_time = time.time() + post_time += post_process_time - decode_time + + dets[12] = dets_sub[12] + dets[13] = dets_sub[13] + + detections.append(dets) + + results = self.merge_outputs(detections) + # torch.cuda.synchronize() + end_time = time.time() + merge_time += end_time - post_process_time + tot_time += end_time - start_time + + # import pdb; pdb.set_trace() + if self.opt.debug >= 1: + if isinstance(image_or_path_or_tensor, str): + image_name = os.path.basename(image_or_path_or_tensor) + else: + print("--> warning: use demo.py for a better visualization") + image_name = "{}.jpg".format(time.time()) + # self.show_results(debugger, image, results, corner, image_name) + + return {'results': results, 'tot': tot_time, 'load': load_time, + 'pre': pre_time, 'net': net_time, 'dec': dec_time, 'corner': corner, + 'post': post_time, 'merge': merge_time, 'output': output} diff --git a/pix2text/doc_xl_layout/detectors/ctdet_subfield.py b/pix2text/doc_xl_layout/detectors/ctdet_subfield.py new file mode 100644 index 0000000000000000000000000000000000000000..aee3d617461adbb188b4dafd3f9ed5dcf8af7634 --- /dev/null +++ b/pix2text/doc_xl_layout/detectors/ctdet_subfield.py @@ -0,0 +1,225 @@ +# coding: utf-8 +import time +import numpy as np +import torch + +# from external.nms import soft_nms +from ..external.shapelyNMS import pnms +from ..models.decode import ctdet_4ps_decode, ctdet_cls_decode +from ..models.utils import flip_tensor +from ..utils.post_process import ctdet_4ps_post_process +from .base_detector_subfield import BaseDetector + + +class CtdetDetector_Subfield(BaseDetector): + def __init__(self, opt): + super(CtdetDetector_Subfield, self).__init__(opt) + + def process(self, images, return_time=False): + # import ipdb;ipdb.set_trace() + with torch.no_grad(): + output = self.model(images)[-1] + if self.opt.convert_onnx == 1: + # torch.cuda.synchronize() + inputs = ['data'] + outputs = [ + 'hm.0.sigmoid', + 'hm.0.maxpool', + 'cls.0.sigmoid', + 'ftype.0.sigmoid', + 'wh.2', + 'reg.2', + 'hm_sub.0.sigmoid', + 'hm_sub.0.maxpool', + 'wh_sub.2', + 'reg_sub.2', + ] + dynamic_axes = { + 'data': {2: 'h', 3: 'w'}, + 'hm.0.sigmoid': {2: 'H', 3: 'W'}, + 'hm.0.maxpool': {2: 'H', 3: 'W'}, + 'cls.0.sigmoid': {2: 'H', 3: 'W'}, + 'ftype.0.sigmoid': {2: 'H', 3: 'W'}, + 'wh.2': {2: 'H', 3: 'W'}, + 'reg.2': {2: 'H', 3: 'W'}, + 'hm_sub.0.sigmoid': {2: 'H', 3: 'W'}, + 'hm_sub.0.maxpool': {2: 'H', 3: 'W'}, + 'wh_sub.2': {2: 'H', 3: 'W'}, + 'reg_sub.2': {2: 'H', 3: 'W'}, + } + + onnx_path = self.opt.onnx_path + if self.opt.onnx_path == "auto": + onnx_path = "{}_{}cls_{}ftype.onnx".format( + self.opt.dataset, + self.opt.num_classes, + self.opt.num_secondary_classes, + ) + + torch.onnx.export( + self.model, + images, + onnx_path, + input_names=inputs, + output_names=outputs, + dynamic_axes=dynamic_axes, + do_constant_folding=True, + opset_version=10, + ) + print("--> info: onnx is saved at: {}".format(onnx_path)) + cls = output['cls_sigmoid'] + hm = output['hm_sigmoid'] + ftype = output['ftype_sigmoid'] + + # add sub + hm_sub = output['hm_sigmoid_sub'] + else: + hm = output['hm'].sigmoid_() + cls = output['cls'].sigmoid_() + ftype = output['ftype'].sigmoid_() + + # add sub + hm_sub = output['hm_sub'].sigmoid_() + + wh = output['wh'] + reg = output['reg'] if self.opt.reg_offset else None + + # add sub + wh_sub = output['wh_sub'] + reg_sub = output['reg_sub'] if self.opt.reg_offset else None + + if self.opt.flip_test: + hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2 + wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2 + reg = reg[0:1] if reg is not None else None + # torch.cuda.synchronize() + forward_time = time.time() + # return dets [bboxes, scores, clses] + # breakpoint() + dets, inds = ctdet_4ps_decode(hm, wh, reg=reg, K=self.opt.K) + + # add sub + dets_sub, inds_sub = ctdet_4ps_decode( + hm_sub, wh_sub, reg=reg_sub, K=self.opt.K + ) + + box_cls = ctdet_cls_decode(cls, inds) + box_ftype = ctdet_cls_decode(ftype, inds) + clses = torch.argmax(box_cls, dim=2, keepdim=True) + ftypes = torch.argmax(box_ftype, dim=2, keepdim=True) + dets = np.concatenate( + ( + dets.detach().cpu().numpy(), + clses.detach().cpu().numpy(), + ftypes.detach().cpu().numpy(), + ), + axis=2, + ) + dets = np.array(dets) + + # add subfield + dets_sub = np.concatenate( + ( + dets_sub.detach().cpu().numpy(), + clses.detach().cpu().numpy(), + ftypes.detach().cpu().numpy(), + ), + axis=2, + ) + dets_sub = np.array(dets_sub) + dets_sub[:, :, -3] += 11 + + corner = 0 + + if return_time: + return output, dets, dets_sub, corner, forward_time + else: + return output, dets, dets_sub + + def post_process(self, dets, corner, meta, scale=1): + if self.opt.nms: + detn = pnms(dets[0], self.opt.scores_thresh) + if detn.shape[0] > 0: + dets = detn.reshape(1, -1, detn.shape[1]) + k = dets.shape[2] if dets.shape[1] != 0 else 0 + if dets.shape[1] != 0: + dets = dets.reshape(1, -1, dets.shape[2]) + # return dets is list and what in dets is dict. key of dict is classes, value of dict is [bbox,score] + dets = ctdet_4ps_post_process( + dets.copy(), + [meta['c']], + [meta['s']], + meta['out_height'], + meta['out_width'], + self.opt.num_classes, + ) + for j in range(1, self.num_classes + 1): + dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, k) + dets[0][j][:, :8] /= scale + else: + ret = {} + dets = [] + for j in range(1, self.num_classes + 1): + ret[j] = np.array([0] * k, dtype=np.float32) # .reshape(-1, k) + dets.append(ret) + return dets[0], corner + + def merge_outputs(self, detections): + results = {} + for j in range(1, self.num_classes + 1): + results[j] = np.concatenate( + [detection[j] for detection in detections], axis=0 + ).astype(np.float32) + # if len(self.scales) > 1 or self.opt.nms: + # results[j] = pnms(results[j],self.opt.nms_thresh) + shape_num = 0 + for j in range(1, self.num_classes + 1): + shape_num = shape_num + len(results[j]) + if shape_num != 0: + # print(np.array(results[1])) + scores = np.hstack( + [results[j][:, 8] for j in range(1, self.num_classes + 1)] + ) + else: + scores = [] + if len(scores) > self.max_per_image: + kth = len(scores) - self.max_per_image + thresh = np.partition(scores, kth)[kth] + for j in range(1, self.num_classes + 1): + keep_inds = results[j][:, 8] >= thresh + results[j] = results[j][keep_inds] + return results + + def debug(self, debugger, images, dets, output, scale=1): + # detection = dets.detach().cpu().numpy().copy() + detection = dets.copy() + detection[:, :, :8] *= self.opt.down_ratio + for i in range(1): + img = images[i].detach().cpu().numpy().transpose(1, 2, 0) + img = ((img * self.std + self.mean) * 255).astype(np.uint8) + pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy()) + debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale)) + debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale)) + # pdb.set_trace() + for k in range(len(dets[i])): + if detection[i, k, 8] > self.opt.center_thresh: + debugger.add_4ps_coco_bbox( + detection[i, k, :8], + detection[i, k, -1], + detection[i, k, 8], + img_id='out_pred_{:.1f}'.format(scale), + ) + + def show_results(self, debugger, image, results, Corners, image_name): + debugger.add_img(image, img_id='ctdet') + count = 0 + for j in range(1, self.num_classes + 1): + for bbox in results[j]: + if bbox[8] > self.opt.scores_thresh: + count += 1 + # print("bbox info:",j-1, bbox.tolist()) + # print(j-1) + debugger.add_4ps_coco_bbox( + bbox, j - 1, bbox[8], show_txt=True, img_id='ctdet' + ) + debugger.save_all_imgs(image_name, './outputs/') diff --git a/pix2text/doc_xl_layout/detectors/detector_factory.py b/pix2text/doc_xl_layout/detectors/detector_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..30080aaf46e75e64defa848e1ff0811c524e3ffc --- /dev/null +++ b/pix2text/doc_xl_layout/detectors/detector_factory.py @@ -0,0 +1,7 @@ +# coding: utf-8 + +from .ctdet_subfield import CtdetDetector_Subfield + +detector_factory = { + 'ctdet_subfield': CtdetDetector_Subfield +} diff --git a/pix2text/doc_xl_layout/doc_xl_layout_parser.py b/pix2text/doc_xl_layout/doc_xl_layout_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..62820d6935e070fe466e3b60363fc069bb169138 --- /dev/null +++ b/pix2text/doc_xl_layout/doc_xl_layout_parser.py @@ -0,0 +1,478 @@ +# coding: utf-8 +# Adapted from https://github.com/AlibabaResearch/AdvancedLiterateMachinery +import json +import os +import shutil +from collections import defaultdict +from copy import deepcopy, copy +from pathlib import Path +import logging +from typing import Union, List, Dict, Any, Optional + +import numpy as np +from PIL import Image + +from .opts import opts +from .huntie_subfield import Huntie_Subfield +from .detectors.detector_factory import detector_factory +from .wrapper import wrap_result +from ..consts import MODEL_VERSION +from ..layout_parser import LayoutParser, ElementType +from ..utils import ( + select_device, + read_img, + data_dir, + save_layout_img, + clipbox, + overlap, + box2list, + x_overlap, + merge_boxes, + prepare_model_files2, +) + +logger = logging.getLogger(__name__) + +CATEGORIES = { + "title": 0, + "figure": 1, + "plain text": 2, + "header": 3, + "page number": 4, + "footnote": 5, + "footer": 6, + "table": 7, + "table caption": 8, + "figure caption": 9, + "equation": 10, + "full column": 11, + "sub column": 12, +} +CATEGORY_MAPPING = [''] * len(CATEGORIES) +for cate, idx in CATEGORIES.items(): + CATEGORY_MAPPING[idx] = cate + + +class DocXLayoutOutput: + def __init__(self, layout_detection_info, subfield_detection_info, message=''): + self.layout_detection_info = layout_detection_info + self.subfield_detection_info = subfield_detection_info + self.message = message + + def to_json(self): + return wrap_result( + self.layout_detection_info, self.subfield_detection_info, CATEGORY_MAPPING + ) + + +class DocXLayoutParser(LayoutParser): + ignored_types = {'footnote', 'footer', 'page number'} + type_mappings = { + 'title': ElementType.TITLE, + 'figure': ElementType.FIGURE, + 'plain text': ElementType.TEXT, + 'header': ElementType.TEXT, + 'table': ElementType.TABLE, + 'table caption': ElementType.TEXT, + 'figure caption': ElementType.TEXT, + 'equation': ElementType.FORMULA, + } + # types that are isolated and usually don't cross different columns. They should not be merged with other elements + is_isolated = {'header', 'table caption', 'figure caption', 'equation'} + + def __init__( + self, + device: str = None, + model_fp: Optional[str] = None, + root: Union[str, Path] = data_dir(), + **kwargs, + ): + if model_fp is None: + model_fp = self._prepare_model_files(root, None) + new_params = { + 'task': 'ctdet_subfield', + 'arch': 'dlav0subfield_34', + 'input_res': 768, + 'num_classes': 13, + 'scores_thresh': kwargs.get('scores_thresh', 0.35), + 'load_model': str(model_fp), + 'debug': kwargs.get('debug', 0), + } + + opt = opts().parse(new_params) + opt = opts().update_dataset_info_and_set_heads(opt, Huntie_Subfield) + opt.device = select_device(device) + + Detector = detector_factory[opt.task] + detector = Detector(opt) + self.detector = detector + self.opt = opt + logger.debug("DocXLayoutParser parameters %s", self.opt) + + @classmethod + def from_config(cls, configs: Optional[dict] = None, device: str = None, **kwargs): + configs = copy(configs or {}) + device = select_device(device) + model_fp = configs.pop('model_fp', None) + root = configs.pop('root', data_dir()) + configs.pop('device', None) + + return cls(device=device, model_fp=model_fp, root=root, **configs) + + def _prepare_model_files(self, root, model_info): + model_root_dir = Path(root).expanduser() / MODEL_VERSION + model_dir = model_root_dir / 'layout-parser' + model_fp = model_dir / 'DocXLayout_231012.pth' + if model_fp.exists(): + return model_fp + model_fp = prepare_model_files2( + model_fp_or_dir=model_fp, + remote_repo="breezedeus/pix2text-layout", + file_or_dir="file", + ) + return model_fp + + def convert_eval_format(self, all_bboxes, opt): + layout_detection_items = [] + subfield_detection_items = [] + for cls_ind in all_bboxes: + for box in all_bboxes[cls_ind]: + if box[8] < opt.scores_thresh: + continue + pts = np.round(box).tolist()[:8] + score = box[8] + category_id = box[9] + # direction_id = box[10] + # secondary_id = box[11] + detection = { + "category_id": int(category_id), + # "secondary_id": int(secondary_id), + # "direction_id": int(direction_id), + "poly": pts, + "score": float("{:.2f}".format(score)), + } + if cls_ind in (12, 13): + subfield_detection_items.append(detection) + else: + layout_detection_items.append(detection) + return layout_detection_items, subfield_detection_items + + def parse( + self, + img: Union[str, Path, Image.Image], + table_as_image: bool = False, + **kwargs, + ) -> (List[Dict[str, Any]], Dict[str, Any]): + """ + + Args: + img (): + table_as_image (): + **kwargs (): + * save_debug_res (str): if `save_debug_res` is set, the directory to save the debug results; default value is `None`, which means not to save + * expansion_margin (int): expansion margin + + Returns: + + """ + if isinstance(img, Image.Image): + img0 = img.convert('RGB') + else: + img0 = read_img(img, return_type='Image') + img_width, img_height = img0.size + try: + # to np.array, RGB -> BGR + ret = self.detector.run(np.array(img0)[:, :, ::-1]) + layout_detection_info, subfield_detection_info = self.convert_eval_format( + ret['results'], self.opt + ) + out = DocXLayoutOutput( + layout_detection_info, subfield_detection_info, message='success' + ) + except Exception as e: + logger.warning("DocXLayoutPredictor Error %s", repr(e)) + out = DocXLayoutOutput([], [], message=repr(e)) + + layout_out = out.to_json() + debug_dir = None + if kwargs.get('save_debug_res', None): + debug_dir = Path(kwargs.get('save_debug_res')) + debug_dir.mkdir(exist_ok=True, parents=True) + if debug_dir is not None: + with open(debug_dir / 'layout_out.json', 'w', encoding='utf-8') as f: + json.dump( + layout_out, f, indent=2, ensure_ascii=False, + ) + if layout_out: + layout_out = self._preprocess_outputs(img0, layout_out) + layout_out, column_meta = self._format_outputs( + img0, layout_out, table_as_image + ) + else: + layout_out, column_meta = [], {} + + layout_out = self._merge_overlapped_boxes(layout_out) + + expansion_margin = kwargs.get('expansion_margin', 8) + layout_out = self._expand_boxes( + layout_out, expansion_margin, height=img_height, width=img_width + ) + + save_layout_fp = kwargs.get( + 'save_layout_res', + debug_dir / 'layout_res.jpg' if debug_dir is not None else None, + ) + if save_layout_fp: + element_type_list = [t for t in ElementType] + save_layout_img( + img0, + element_type_list, + layout_out, + save_path=save_layout_fp, + key='position', + ) + + return layout_out, column_meta + + def _preprocess_outputs(self, img0, outs): + width, height = img0.size + + subfields = outs['subfields'] + for column_info in subfields: + layout_out = column_info['layouts'] + if len(layout_out) < 2: + continue + for idx, cur_box_info in enumerate(layout_out[:-1]): + next_box_info = layout_out[idx + 1] + cur_box_ymax = cur_box_info['pts'][-1] + next_box_ymin = next_box_info['pts'][1] + if ( + cur_box_info['category'] == 'figure' + and next_box_info['category'] == 'figure caption' + and -6 < next_box_ymin - cur_box_ymax < 80 + ): + new_xmin = min(cur_box_info['pts'][0], next_box_info['pts'][0]) + # new_xmin = max(new_xmin, 0, col_pts[0]) + new_xmax = max(cur_box_info['pts'][2], next_box_info['pts'][2]) + # new_xmax = min(new_xmax, ) + new_ymin = max(0, cur_box_info['pts'][1]) + new_ymax = max(cur_box_ymax, next_box_ymin - 16) + new_box = [ + new_xmin, + new_ymin, + new_xmax, + new_ymin, + new_xmax, + new_ymax, + new_xmin, + new_ymax, + ] + layout_out[idx]['pts'] = new_box + # FIXME: first figure caption, then figure + + return outs + + def _format_outputs(self, img0, out, table_as_image: bool): + width, height = img0.size + + column_meta = defaultdict(dict) + final_out = [] + subfields = out['subfields'] + col_number = 0 + for column_info in subfields: + if column_info['category'] == 'sub column': + cur_col_number = col_number + col_number += 1 + elif column_info['category'] == 'full column': # == 'full column' + cur_col_number = -1 + else: # '其他' + cur_col_number = -2 + box = clipbox(np.array(column_info['pts']).reshape(4, 2), height, width) + column_meta[cur_col_number]['position'] = box + column_meta[cur_col_number]['score'] = column_info['confidence'] + layout_out = column_info['layouts'] + for box_info in layout_out: + image_type = box_info['category'] + isolated = image_type in self.is_isolated + if image_type in self.ignored_types: + image_type = ElementType.IGNORED + else: + image_type = self.type_mappings.get(image_type, ElementType.UNKNOWN) + if table_as_image and image_type == ElementType.TABLE: + image_type = ElementType.FIGURE + box = clipbox(np.array(box_info['pts']).reshape(4, 2), height, width) + final_out.append( + { + 'type': image_type, + 'position': box, + 'score': box_info['confidence'], + 'col_number': cur_col_number, + 'isolated': isolated, + } + ) + + if -2 in column_meta and -1 in column_meta: + filtered_out = [] + full_column_box = column_meta[-1]['position'] + full_column_xmin, full_column_xmax = ( + full_column_box[0, 0], + full_column_box[1, 0], + ) + for box_info in final_out: + if box_info['col_number'] != -2: + filtered_out.append(box_info) + continue + cur_box = box_info['position'] + cur_box_xmin, cur_box_xmax = cur_box[0, 0], cur_box[1, 0] + cur_box_ymin, cur_box_ymax = cur_box[0, 1], cur_box[2, 1] + if ( + box_info['type'] == ElementType.TEXT + and ( + cur_box_xmax < full_column_xmin + or cur_box_xmin > full_column_xmax + ) + and cur_box_ymax - cur_box_ymin > 5 * (cur_box_xmax - cur_box_xmin) + ): # unnecessary block + box_info['type'] = ElementType.IGNORED + filtered_out.append(box_info) + + final_out = filtered_out + + # handle abnormal elements (col_number == -2) + if -2 in column_meta: + column_meta.pop(-2) + # guess which column the box belongs to + for _box_info in final_out: + if _box_info['col_number'] != -2: + continue + overlap_vals = [] + for col_number, col_info in column_meta.items(): + overlap_val = x_overlap(_box_info, col_info, key='position') + overlap_vals.append([col_number, overlap_val]) + if overlap_vals: + overlap_vals.sort(key=lambda x: (x[1], x[0]), reverse=True) + match_col_number = overlap_vals[0][0] + _box_info['col_number'] = match_col_number + else: + _box_info['col_number'] = 0 + + return final_out, column_meta + + @classmethod + def _merge_overlapped_boxes(cls, layout_out): + """ + Detected bounding boxes may overlap; merge these overlapping boxes into a single one. + """ + if len(layout_out) < 2: + return layout_out + layout_out = deepcopy(layout_out) + + def _overlay_vertically(box1, box2): + if x_overlap(box1, box2, key=None) < 0.8: + return False + box1 = box2list(box1) + box2 = box2list(box2) + # 判断是否有交集 + if box1[3] <= box2[1] or box2[3] <= box1[1]: + return False + # 计算交集的高度 + y_min = max(box1[1], box2[1]) + y_max = min(box1[3], box2[3]) + return y_max - y_min > 10 + + for anchor_idx, anchor_box_info in enumerate(layout_out): + if anchor_box_info['type'] != ElementType.TEXT or anchor_box_info.get( + 'used', False + ): + continue + for cand_idx, cand_box_info in enumerate(layout_out): + if anchor_idx == cand_idx: + continue + if cand_box_info['type'] != ElementType.TEXT or cand_box_info.get( + 'used', False + ): + continue + if not _overlay_vertically( + anchor_box_info['position'], cand_box_info['position'] + ): + continue + anchor_box_info['position'] = merge_boxes( + anchor_box_info['position'], cand_box_info['position'] + ) + cand_box_info['used'] = True + + return [box_info for box_info in layout_out if not box_info.get('used', False)] + + @classmethod + def _expand_boxes(cls, layout_out, expansion_margin, height, width): + """ + Expand boxes with some margin to get better results + Args: + layout_out (): layout_out + expansion_margin (int): expansion margin + height (int): height of the image + width (int): width of the image + + Returns: layout_out with expanded boxes + + """ + + def _overlap_with_some_box(idx, anchor_box): + # anchor_box = layout_out[idx] + return any( + [ + overlap(anchor_box, box_info['position'], key=None) > 0 + for idx2, box_info in enumerate(layout_out) + if idx2 != idx + ] + ) + + for idx, box_info in enumerate(layout_out): + if box_info['type'] not in ( + ElementType.TEXT, + ElementType.TITLE, + ElementType.FORMULA, + ): + continue + if _overlap_with_some_box(idx, box_info['position']): + continue + + # expand xmin and xmax + new_box = box_info['position'].copy() + xmin, xmax = new_box[0, 0], new_box[1, 0] + xmin -= expansion_margin + xmax += expansion_margin + if xmin <= 8: + xmin = 0 + if xmax + 8 >= width: + xmax = width + new_box[0, 0] = new_box[3, 0] = xmin + new_box = clipbox(new_box, height, width) + if not _overlap_with_some_box(idx, new_box): + layout_out[idx]['position'] = new_box + new_box = layout_out[idx]['position'].copy() + new_box[1, 0] = new_box[2, 0] = xmax + new_box = clipbox(new_box, height, width) + if not _overlap_with_some_box(idx, new_box): + layout_out[idx]['position'] = new_box + + # expand ymin and ymax + new_box = layout_out[idx]['position'].copy() + ymin, ymax = new_box[0, 1], new_box[2, 1] + ymin -= expansion_margin + ymax += expansion_margin + if ymin <= 8: + ymin = 0 + if ymax + 8 >= height: + ymax = height + new_box[0, 1] = new_box[1, 1] = ymin + new_box = clipbox(new_box, height, width) + if not _overlap_with_some_box(idx, new_box): + layout_out[idx]['position'] = new_box + new_box = layout_out[idx]['position'].copy() + new_box[2, 1] = new_box[3, 1] = ymax + new_box = clipbox(new_box, height, width) + if not _overlap_with_some_box(idx, new_box): + layout_out[idx]['position'] = new_box + + return layout_out diff --git a/pix2text/doc_xl_layout/external/__init__.py b/pix2text/doc_xl_layout/external/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pix2text/doc_xl_layout/external/shapelyNMS.py b/pix2text/doc_xl_layout/external/shapelyNMS.py new file mode 100644 index 0000000000000000000000000000000000000000..4047f26d57e68d15ad1a1ee6379b4a80b37cd374 --- /dev/null +++ b/pix2text/doc_xl_layout/external/shapelyNMS.py @@ -0,0 +1,75 @@ +import numpy as np + + +def pnms(dets, thresh): + if len(dets) < 2: + return dets + scores = dets[:, 8] + index_keep = [] + keep = [] + for i in range(len(dets)): + box = dets[i] + if box[8] < thresh: + continue + max_score_index = -1 + ctx = (dets[i][0] + dets[i][2] + dets[i][4] + dets[i][6]) / 4 + cty = (dets[i][1] + dets[i][3] + dets[i][5] + dets[i][7]) / 4 + for j in range(len(dets)): + if i == j or dets[j][8] < thresh: + continue + x1, y1 = dets[j][0], dets[j][1] + x2, y2 = dets[j][2], dets[j][3] + x3, y3 = dets[j][4], dets[j][5] + x4, y4 = dets[j][6], dets[j][7] + a = (x2 - x1) * (cty - y1) - (y2 - y1) * (ctx - x1) + b = (x3 - x2) * (cty - y2) - (y3 - y2) * (ctx - x2) + c = (x4 - x3) * (cty - y3) - (y4 - y3) * (ctx - x3) + d = (x1 - x4) * (cty - y4) - (y1 - y4) * (ctx - x4) + if ((a > 0 and b > 0 and c > 0 and d > 0) or (a < 0 and b < 0 and c < 0 and d < 0)): + if dets[i][8] > dets[j][8] and max_score_index < 0: + max_score_index = i + elif dets[i][8] < dets[j][8]: + max_score_index = -2 + break + if max_score_index > -1: + index_keep.append(max_score_index) + elif max_score_index == -1: + index_keep.append(i) + for i in range(0, len(index_keep)): + keep.append(dets[index_keep[i]]) + + return np.array(keep) + + ''' + pts = [] + for i in range(dets.shape[0]): + pts.append([dets[i][0:2],dets[i][2:4],dets[i][4:6],dets[i][6:8]]) + + areas = np.zeros(scores.shape) + order = scores.argsort()[::-1] + inter_areas = np.zeros((scores.shape[0],scores.shape[0])) + + for i in range(0,len(pts)): + poly = Polygon(pts[i]) + areas[i] = poly.area + + for j in range(i, len(pts)): + polyj = Polygon(pts[j]) + try: + inS = poly.intersection(polyj) + except Exception as e: + print(pts[i],'\n',pts[j]) + return dets + inter_areas[i][j] = inS.area + inter_areas[j][i] = inS.area + + keep = [] + while order.size > 0: + i = order[0] + keep.append(dets[i]) + ovr = inter_areas[i][order[1:]] / (areas[i] + areas[order[1:]] - inter_areas[i][order[1:]]) + inds = np.where(ovr <= thresh)[0] + order = order[inds + 1] + + return keep + ''' diff --git a/pix2text/doc_xl_layout/huntie_subfield.py b/pix2text/doc_xl_layout/huntie_subfield.py new file mode 100644 index 0000000000000000000000000000000000000000..b6908a1f3b004723b2f260e5975f65d2f866fb63 --- /dev/null +++ b/pix2text/doc_xl_layout/huntie_subfield.py @@ -0,0 +1,13 @@ +import numpy as np +import torch.utils.data as data + + +class Huntie_Subfield(data.Dataset): + num_classes = 13 + num_secondary_classes = 3 + default_resolution = [768, 768] + mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32).reshape(1, 1, 3) + std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32).reshape(1, 1, 3) + + + diff --git a/pix2text/doc_xl_layout/opts.py b/pix2text/doc_xl_layout/opts.py new file mode 100644 index 0000000000000000000000000000000000000000..80315e2dd688438fa57817e4048b063d44f6d6b9 --- /dev/null +++ b/pix2text/doc_xl_layout/opts.py @@ -0,0 +1,410 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os + + +class opts(object): + def __init__(self): + self.parser = argparse.ArgumentParser() + # basic experiment setting + self.parser.add_argument('task', default='ctdet', + help='ctdet | ddd | multi_pose | exdet | ctdet_subfield') + self.parser.add_argument('--dataset', default='huntie', + help='coco | kitti | coco_hp | pascal | huntie | structure') + self.parser.add_argument('--test', action='store_true') + self.parser.add_argument('--data_src', default="default", type=str, + help='The path of input data.') + self.parser.add_argument('--exp_id', default='default', type=str) + self.parser.add_argument('--vis_corner', type=int, default=0, + help='vis corner or not' + '0: do not vis corner' + '1: vis corner') + self.parser.add_argument('--convert_onnx', type=int, default=0, + help='0: donot convert' + '1: convert pytorch model to onnx') + self.parser.add_argument('--onnx_path', type=str, default="auto", + help='path of output onnx file.') + self.parser.add_argument('--debug', type=int, default=0, + help='level of visualization.' + '1: only show the final detection results' + '2: show the network output features' + '3: use matplot to display' # useful when lunching training with ipython notebook + '4: save all visualizations to disk') + self.parser.add_argument('--load_model', default='', + help='path to pretrained model') + self.parser.add_argument('--resume', action='store_true', + help='resume an experiment. ' + 'Reloaded the optimizer parameter and ' + 'set load_model to model_last.pth ' + 'in the exp dir if load_model is empty.') + + # system + self.parser.add_argument('--gpus', default='-1', + help='-1 for CPU, use comma for multiple gpus') + self.parser.add_argument('--num_workers', type=int, default=16, + help='dataloader threads. 0 for single-thread.') + self.parser.add_argument('--not_cuda_benchmark', action='store_true', + help='disable when the input size is not fixed.') + self.parser.add_argument('--seed', type=int, default=317, + help='random seed') # from CornerNet + + # log + self.parser.add_argument('--print_iter', type=int, default=0, + help='disable progress bar and print to screen.') + self.parser.add_argument('--hide_data_time', action='store_true', + help='not display time during training.') + self.parser.add_argument('--save_all', action='store_true', + help='save model to disk every 5 epochs.') + self.parser.add_argument('--metric', default='loss', + help='main metric to save best model') + self.parser.add_argument('--vis_thresh', type=float, default=0.3, + help='visualization threshold.') + self.parser.add_argument('--nms_thresh', type=float, default=0.3, + help='nms threshold.') + self.parser.add_argument('--corner_thresh', type=float, default=0.3, + help='threshold for corner.') + self.parser.add_argument('--debugger_theme', default='white', + choices=['white', 'black']) + + # model + self.parser.add_argument('--arch', default='dla_34', + help='model architecture. Currently tested' + 'res_18 | res_101 | resdcn_18 | resdcn_101 |' + 'dlav0_34 | dla_34 | hourglass') + self.parser.add_argument('--head_conv', type=int, default=-1, + help='conv layer channels for output head' + '0 for no conv layer' + '-1 for default setting: ' + '64 for resnets and 256 for dla.') + self.parser.add_argument('--down_ratio', type=int, default=4, + help='output stride. Currently only supports 4.') + + # input + self.parser.add_argument('--input_res', type=int, default=-1, + help='input height and width. -1 for default from ' + 'dataset. Will be overriden by input_h | input_w') + self.parser.add_argument('--input_h', type=int, default=-1, + help='input height. -1 for default from dataset.') + self.parser.add_argument('--input_w', type=int, default=-1, + help='input width. -1 for default from dataset.') + + # train + self.parser.add_argument('--lr', type=float, default=1.25e-4, + help='learning rate for batch size 32.') + self.parser.add_argument('--lr_step', type=str, default='80', + help='drop learning rate by 10.') + self.parser.add_argument('--NotFixList', type=str, default='', + help='not fix layer name.') + self.parser.add_argument('--num_epochs', type=int, default=90, + help='total training epochs.') + self.parser.add_argument('--batch_size', type=int, default=32, + help='batch size') + self.parser.add_argument('--master_batch_size', type=int, default=-1, + help='batch size on the master gpu.') + self.parser.add_argument('--num_iters', type=int, default=-1, + help='default: #samples / batch_size.') + self.parser.add_argument('--val_intervals', type=int, default=5, + help='number of epochs to run validation.') + self.parser.add_argument('--trainval', action='store_true', + help='include validation in training and test on test set') + self.parser.add_argument('--negative', action='store_true', + help='flip data augmentation.') + self.parser.add_argument('--adamW', action='store_true', + help='using adamW or adam.') + + # test + self.parser.add_argument('--save_dir', default="default", type=str, + help='The path of output data.') + self.parser.add_argument('--flip_test', action='store_true', + help='flip data augmentation.') + self.parser.add_argument('--test_scales', type=str, default='1', + help='multi scale test augmentation.') + self.parser.add_argument('--nms', action='store_false', + help='run nms in testing.') + self.parser.add_argument('--K', type=int, default=100, + help='max number of output objects.') + self.parser.add_argument('--fix_res', action='store_true', + help='fix testing resolution or keep the original resolution') + self.parser.add_argument('--keep_res', action='store_true', + help='keep the original resolution during validation.') + + # dataset + self.parser.add_argument('--not_rand_crop', action='store_true', + help='not use the random crop data augmentation from CornerNet.') + self.parser.add_argument('--shift', type=float, default=0.1, + help='when not using random crop apply shift augmentation.') + self.parser.add_argument('--scale', type=float, default=0.4, + help='when not using random crop apply scale augmentation.') + self.parser.add_argument('--rotate', type=float, default=0, + help='when not using random crop apply rotation augmentation.') + self.parser.add_argument('--flip', type=float, default=0.5, + help='probability of applying flip augmentation.') + self.parser.add_argument('--maskvisual', type=float, default=0., + help='probability of masking image.') + self.parser.add_argument('--maskgrid', type=float, default=0., + help='probability of masking grid, only available when visual is not masked.') + self.parser.add_argument('--no_color_aug', action='store_true', + help='not use the color augmenation from CornerNet') + self.parser.add_argument('--MK', default=500, + help='max corner number') + self.parser.add_argument('--rot', action='store_false', + help='rotate image') + self.parser.add_argument('--warp', action='store_false', + help='warp image') + self.parser.add_argument('--normal_padding', action='store_false', + help='normal_padding image') + self.parser.add_argument('--extra_channel', action='store_true', + help='concat edge channel to the input image') + self.parser.add_argument('--init_emb', type=str, default='', + help='embedding layer.') + self.parser.add_argument('--grid_type', type=str, default='char_point', + help='type of grid, candidates: char_point, char_box (CharGrid), line (WordGrid).') + self.parser.add_argument('--finetune_emb', action='store_true', + help='embedding finetune') + self.parser.add_argument('--dic', type=str, default='', + help='dic file for grid.') + self.parser.add_argument('--sample_limit', type=int, default=-1, + help='limit samples for training') + + # multi_pose + self.parser.add_argument('--aug_rot', type=float, default=0, + help='probability of applying rotation augmentation.') + # ddd + self.parser.add_argument('--aug_ddd', type=float, default=0.5, + help='probability of applying crop augmentation.') + self.parser.add_argument('--rect_mask', action='store_true', + help='for ignored object, apply mask on the ' + 'rectangular region or just center point.') + self.parser.add_argument('--kitti_split', default='3dop', + help='different validation split for kitti: ' + '3dop | subcnn') + + # loss + self.parser.add_argument('--mse_loss', action='store_true', + help='use mse loss or focal loss to train keypoint heatmaps.') + # ctdet + self.parser.add_argument('--num_classes', type=int, default=-1, + help='the number of main category. -1 means use default from dataset.') + self.parser.add_argument('--num_secondary_classes', type=int, default=-1, + help='the number of secondary category. -1 means use default from dataset.') + self.parser.add_argument('--reg_loss', default='l1', + help='regression loss: sl1 | l1 | l2') + self.parser.add_argument('--hm_weight', type=float, default=1, + help='loss weight for keypoint heatmaps.') + self.parser.add_argument('--cls_weight', type=float, default=1, + help='loss weight for keypoint heatmaps.') + self.parser.add_argument('--ftype_weight', type=float, default=1, + help='loss weight for keypoint heatmaps.') + self.parser.add_argument('--mk_weight', type=float, default=1, + help='loss weight for corner keypoint heatmaps.') + self.parser.add_argument('--off_weight', type=float, default=1, + help='loss weight for keypoint local offsets.') + self.parser.add_argument('--wh_weight', type=float, default=1, + help='loss weight for bounding box size.') + # multi_pose + self.parser.add_argument('--hp_weight', type=float, default=1, + help='loss weight for human pose offset.') + self.parser.add_argument('--hm_hp_weight', type=float, default=1, + help='loss weight for human keypoint heatmap.') + # ddd + self.parser.add_argument('--dep_weight', type=float, default=1, + help='loss weight for depth.') + self.parser.add_argument('--dim_weight', type=float, default=1, + help='loss weight for 3d bounding box size.') + self.parser.add_argument('--rot_weight', type=float, default=1, + help='loss weight for orientation.') + self.parser.add_argument('--peak_thresh', type=float, default=0.1) + + # task + # ctdet + self.parser.add_argument('--norm_wh', action='store_true', + help='L1(\hat(y) / y, 1) or L1(\hat(y), y)') + self.parser.add_argument('--dense_wh', action='store_true', + help='apply weighted regression near center or ' + 'just apply regression on center point.') + self.parser.add_argument('--cat_spec_wh', action='store_true', + help='category specific bounding box size.') + self.parser.add_argument('--not_reg_offset', action='store_true', + help='not regress local offset.') + # exdet + self.parser.add_argument('--agnostic_ex', action='store_true', + help='use category agnostic extreme points.') + self.parser.add_argument('--scores_thresh', type=float, default=0.3, + help='threshold for extreme point heatmap.') + self.parser.add_argument('--center_thresh', type=float, default=0.3, + help='threshold for centermap.') + self.parser.add_argument('--aggr_weight', type=float, default=0.0, + help='edge aggregation weight.') + # multi_pose + self.parser.add_argument('--dense_hp', action='store_true', + help='apply weighted pose regression near center ' + 'or just apply regression on center point.') + self.parser.add_argument('--not_hm_hp', action='store_true', + help='not estimate human joint heatmap, ' + 'directly use the joint offset from center.') + self.parser.add_argument('--not_reg_hp_offset', action='store_true', + help='not regress local offset for ' + 'human joint heatmaps.') + self.parser.add_argument('--not_reg_bbox', action='store_true', + help='not regression bounding box size.') + + # ground truth validation + self.parser.add_argument('--eval_oracle_hm', action='store_true', + help='use ground center heatmap.') + self.parser.add_argument('--eval_oracle_mk', action='store_true', + help='use ground corner heatmap.') + self.parser.add_argument('--eval_oracle_wh', action='store_true', + help='use ground truth bounding box size.') + self.parser.add_argument('--eval_oracle_offset', action='store_true', + help='use ground truth local heatmap offset.') + self.parser.add_argument('--eval_oracle_kps', action='store_true', + help='use ground truth human pose offset.') + self.parser.add_argument('--eval_oracle_hmhp', action='store_true', + help='use ground truth human joint heatmaps.') + self.parser.add_argument('--eval_oracle_hp_offset', action='store_true', + help='use ground truth human joint local offset.') + self.parser.add_argument('--eval_oracle_dep', action='store_true', + help='use ground truth depth.') + + def parse(self, args=None): + if isinstance(args, dict): + task_name = args.get("task", "ctdet") + opt = self.parser.parse_args(args=[task_name]) + opt.__dict__.update(args) + else: + opt = self.parser.parse_args(args=args) + + # import json + # with open("task_config.json", "w") as f: + # json.dump(opt.__dict__, f, ensure_ascii=False, indent=4) + + opt.gpus_str = opt.gpus + opt.gpus = [int(gpu) for gpu in opt.gpus.split(',')] + opt.gpus = [i for i in range(len(opt.gpus))] if opt.gpus[0] >= 0 else [-1] + opt.lr_step = [int(i) for i in opt.lr_step.split(',')] + opt.test_scales = [float(i) for i in opt.test_scales.split(',')] + + opt.fix_res = not opt.keep_res + print('Fix size testing.' if opt.fix_res else 'Keep resolution testing.') + opt.reg_offset = not opt.not_reg_offset + opt.reg_bbox = not opt.not_reg_bbox + opt.hm_hp = not opt.not_hm_hp + opt.reg_hp_offset = (not opt.not_reg_hp_offset) and opt.hm_hp + + if opt.head_conv == -1: # init default head_conv + opt.head_conv = 256 if 'dla' in opt.arch else 64 + opt.pad = 0 # opt.pad = 127 if 'hourglass' in opt.arch else 31 + opt.num_stacks = 2 if opt.arch == 'hourglass' else 1 + + if opt.trainval: + opt.val_intervals = 100000000 + + if opt.debug > 0: + opt.num_workers = 0 + opt.batch_size = 1 + opt.gpus = [opt.gpus[0]] + opt.master_batch_size = -1 + + if opt.master_batch_size == -1: + opt.master_batch_size = opt.batch_size // len(opt.gpus) + rest_batch_size = (opt.batch_size - opt.master_batch_size) + opt.chunk_sizes = [opt.master_batch_size] + for i in range(len(opt.gpus) - 1): + slave_chunk_size = rest_batch_size // (len(opt.gpus) - 1) + if i < rest_batch_size % (len(opt.gpus) - 1): + slave_chunk_size += 1 + opt.chunk_sizes.append(slave_chunk_size) + print('training chunk_sizes:', opt.chunk_sizes) + + opt.root_dir = os.path.join(os.path.dirname(__file__), '..', '..') + opt.data_dir = os.path.join(opt.root_dir, 'data') if opt.data_src == "default" else opt.data_src + opt.exp_dir = os.path.join(opt.root_dir, 'exp', opt.task) + # import pdb; pdb.set_trace() + opt.save_dir = os.path.join(opt.exp_dir, opt.exp_id) if opt.save_dir == "default" else os.path.join(opt.save_dir, opt.exp_id) + opt.debug_dir = os.path.join(opt.save_dir, 'debug') + print('The output will be saved to ', opt.save_dir) + + if opt.resume and opt.load_model == '': + model_path = opt.save_dir[:-4] if opt.save_dir.endswith('TEST') \ + else opt.save_dir + opt.load_model = os.path.join(model_path, 'model_last.pth') + return opt + + def update_dataset_info_and_set_heads(self, opt, dataset): + input_h, input_w = dataset.default_resolution + opt.mean, opt.std = dataset.mean, dataset.std + + if opt.num_classes == -1: + opt.num_classes = dataset.num_classes + if opt.num_secondary_classes == -1: + opt.num_secondary_classes = dataset.num_secondary_classes + + # input_h(w): opt.input_h overrides opt.input_res overrides dataset default + input_h = opt.input_res if opt.input_res > 0 else input_h + input_w = opt.input_res if opt.input_res > 0 else input_w + opt.input_h = opt.input_h if opt.input_h > 0 else input_h + opt.input_w = opt.input_w if opt.input_w > 0 else input_w + opt.output_h = opt.input_h // opt.down_ratio + opt.output_w = opt.input_w // opt.down_ratio + opt.input_res = max(opt.input_h, opt.input_w) + opt.output_res = max(opt.output_h, opt.output_w) + + if opt.task == 'exdet': + # assert opt.dataset in ['coco'] + num_hm = 1 if opt.agnostic_ex else opt.num_classes + opt.heads = {'hm_t': num_hm, 'hm_l': num_hm, + 'hm_b': num_hm, 'hm_r': num_hm, + 'hm_c': opt.num_classes} + if opt.reg_offset: + opt.heads.update({'reg_t': 2, 'reg_l': 2, 'reg_b': 2, 'reg_r': 2}) + elif opt.task == 'ddd': + # assert opt.dataset in ['gta', 'kitti', 'viper'] + opt.heads = {'hm': opt.num_classes, 'dep': 1, 'rot': 8, 'dim': 3} + if opt.reg_bbox: + opt.heads.update( + {'wh': 2}) + if opt.reg_offset: + opt.heads.update({'reg': 2}) + elif opt.task == 'ctdet': + # assert opt.dataset in ['pascal', 'coco'] + opt.heads = {'hm': opt.num_classes, 'cls': 4, 'ftype': opt.num_secondary_classes, + 'wh': 8 if not opt.cat_spec_wh else 8 * opt.num_classes} + if opt.reg_offset: + opt.heads.update({'reg': 2}) + elif opt.task == 'ctdet_dualmodal': + # assert opt.dataset in ['pascal', 'coco'] + opt.heads = {'hm': opt.num_classes, 'cls': 4, 'ftype': opt.num_secondary_classes, + 'wh': 8 if not opt.cat_spec_wh else 8 * opt.num_classes} + if opt.reg_offset: + opt.heads.update({'reg': 2}) + elif opt.task == 'multi_pose': + # assert opt.dataset in ['coco_hp'] + opt.flip_idx = dataset.flip_idx + opt.heads = {'hm': opt.num_classes, 'wh': 2, 'hps': 34} + if opt.reg_offset: + opt.heads.update({'reg': 2}) + if opt.hm_hp: + opt.heads.update({'hm_hp': 17}) + if opt.reg_hp_offset: + opt.heads.update({'hp_offset': 2}) + elif opt.task == 'ctdet_subfield': + # assert opt.dataset in ['pascal', 'coco'] + opt.heads = {'hm': opt.num_classes-2, 'cls': 4, 'ftype': opt.num_secondary_classes, + 'wh': 8 if not opt.cat_spec_wh else 8 * opt.num_classes, 'hm_sub': 2, 'wh_sub': 8 } + if opt.reg_offset: + opt.heads.update({'reg': 2}) + opt.heads.update({'reg_sub': 2}) + else: + assert 0, 'task not defined!' + print('heads', opt.heads) + return opt + + +if __name__ == '__main__': + print("Testing config ... ") + config_dict = {"batch_size": 32, "dataset": "huntie"} + opt = opts().parse(args=config_dict) + print(opt.__dict__) diff --git a/pix2text/doc_xl_layout/utils/__init__.py b/pix2text/doc_xl_layout/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pix2text/doc_xl_layout/utils/ddd_utils.py b/pix2text/doc_xl_layout/utils/ddd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..335254e1ed5a40d2f59f1854533a191c39a5dc0c --- /dev/null +++ b/pix2text/doc_xl_layout/utils/ddd_utils.py @@ -0,0 +1,139 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import cv2 +import numpy as np + + +def compute_box_3d(dim, location, rotation_y): + # dim: 3 + # location: 3 + # rotation_y: 1 + # return: 8 x 3 + c, s = np.cos(rotation_y), np.sin(rotation_y) + R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32) + l, w, h = dim[2], dim[1], dim[0] + x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] + y_corners = [0, 0, 0, 0, -h, -h, -h, -h] + z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2] + + corners = np.array([x_corners, y_corners, z_corners], dtype=np.float32) + corners_3d = np.dot(R, corners) + corners_3d = corners_3d + np.array(location, dtype=np.float32).reshape(3, 1) + return corners_3d.transpose(1, 0) + + +def project_to_image(pts_3d, P): + # pts_3d: n x 3 + # P: 3 x 4 + # return: n x 2 + pts_3d_homo = np.concatenate( + [pts_3d, np.ones((pts_3d.shape[0], 1), dtype=np.float32)], axis=1) + pts_2d = np.dot(P, pts_3d_homo.transpose(1, 0)).transpose(1, 0) + pts_2d = pts_2d[:, :2] / pts_2d[:, 2:] + # import pdb; pdb.set_trace() + return pts_2d + + +def compute_orientation_3d(dim, location, rotation_y): + # dim: 3 + # location: 3 + # rotation_y: 1 + # return: 2 x 3 + c, s = np.cos(rotation_y), np.sin(rotation_y) + R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32) + orientation_3d = np.array([[0, dim[2]], [0, 0], [0, 0]], dtype=np.float32) + orientation_3d = np.dot(R, orientation_3d) + orientation_3d = orientation_3d + \ + np.array(location, dtype=np.float32).reshape(3, 1) + return orientation_3d.transpose(1, 0) + + +def draw_box_3d(image, corners, c=(0, 0, 255)): + face_idx = [[0, 1, 5, 4], + [1, 2, 6, 5], + [2, 3, 7, 6], + [3, 0, 4, 7]] + for ind_f in range(3, -1, -1): + f = face_idx[ind_f] + for j in range(4): + cv2.line(image, (corners[f[j], 0], corners[f[j], 1]), + (corners[f[(j + 1) % 4], 0], corners[f[(j + 1) % 4], 1]), c, 2, lineType=cv2.LINE_AA) + if ind_f == 0: + cv2.line(image, (corners[f[0], 0], corners[f[0], 1]), + (corners[f[2], 0], corners[f[2], 1]), c, 1, lineType=cv2.LINE_AA) + cv2.line(image, (corners[f[1], 0], corners[f[1], 1]), + (corners[f[3], 0], corners[f[3], 1]), c, 1, lineType=cv2.LINE_AA) + return image + + +def unproject_2d_to_3d(pt_2d, depth, P): + # pts_2d: 2 + # depth: 1 + # P: 3 x 4 + # return: 3 + z = depth - P[2, 3] + x = (pt_2d[0] * depth - P[0, 3] - P[0, 2] * z) / P[0, 0] + y = (pt_2d[1] * depth - P[1, 3] - P[1, 2] * z) / P[1, 1] + pt_3d = np.array([x, y, z], dtype=np.float32) + return pt_3d + + +def alpha2rot_y(alpha, x, cx, fx): + """ + Get rotation_y by alpha + theta - 180 + alpha : Observation angle of object, ranging [-pi..pi] + x : Object center x to the camera center (x-W/2), in pixels + rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi] + """ + rot_y = alpha + np.arctan2(x - cx, fx) + if rot_y > np.pi: + rot_y -= 2 * np.pi + if rot_y < -np.pi: + rot_y += 2 * np.pi + return rot_y + + +def rot_y2alpha(rot_y, x, cx, fx): + """ + Get rotation_y by alpha + theta - 180 + alpha : Observation angle of object, ranging [-pi..pi] + x : Object center x to the camera center (x-W/2), in pixels + rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi] + """ + alpha = rot_y - np.arctan2(x - cx, fx) + if alpha > np.pi: + alpha -= 2 * np.pi + if alpha < -np.pi: + alpha += 2 * np.pi + return alpha + + +def ddd2locrot(center, alpha, dim, depth, calib): + # single image + locations = unproject_2d_to_3d(center, depth, calib) + locations[1] += dim[0] / 2 + rotation_y = alpha2rot_y(alpha, center[0], calib[0, 2], calib[0, 0]) + return locations, rotation_y + + +def project_3d_bbox(location, dim, rotation_y, calib): + box_3d = compute_box_3d(dim, location, rotation_y) + box_2d = project_to_image(box_3d, calib) + return box_2d + + +if __name__ == '__main__': + calib = np.array( + [[7.070493000000e+02, 0.000000000000e+00, 6.040814000000e+02, 4.575831000000e+01], + [0.000000000000e+00, 7.070493000000e+02, 1.805066000000e+02, -3.454157000000e-01], + [0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 4.981016000000e-03]], + dtype=np.float32) + alpha = -0.20 + tl = np.array([712.40, 143.00], dtype=np.float32) + br = np.array([810.73, 307.92], dtype=np.float32) + ct = (tl + br) / 2 + rotation_y = 0.01 + print('alpha2rot_y', alpha2rot_y(alpha, ct[0], calib[0, 2], calib[0, 0])) + print('rotation_y', rotation_y) diff --git a/pix2text/doc_xl_layout/utils/debugger.py b/pix2text/doc_xl_layout/utils/debugger.py new file mode 100644 index 0000000000000000000000000000000000000000..2930ae959cae1d58654ac4c9c36ed1b0e62358f4 --- /dev/null +++ b/pix2text/doc_xl_layout/utils/debugger.py @@ -0,0 +1,606 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import cv2 +import numpy as np + +from .ddd_utils import compute_box_3d, project_to_image, draw_box_3d + +class Debugger(object): + def __init__(self, ipynb=False, theme='black', + num_classes=-1, dataset=None, down_ratio=4): + self.ipynb = ipynb + if not self.ipynb: + import matplotlib.pyplot as plt + self.plt = plt + self.imgs = {} + self.theme = theme + colors = [(color_list[_]).astype(np.uint8) \ + for _ in range(len(color_list))] + self.colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 1, 1, 3) + if self.theme == 'white': + self.colors = self.colors.reshape(-1)[::-1].reshape(len(colors), 1, 1, 3) + self.colors = np.clip(self.colors, 0., 0.6 * 255).astype(np.uint8) + self.dim_scale = 1 + if dataset == 'coco_hp': + self.names = ['p'] + self.num_class = 1 + self.num_joints = 17 + self.edges = [[0, 1], [0, 2], [1, 3], [2, 4], + [3, 5], [4, 6], [5, 6], + [5, 7], [7, 9], [6, 8], [8, 10], + [5, 11], [6, 12], [11, 12], + [11, 13], [13, 15], [12, 14], [14, 16]] + self.ec = [(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), + (255, 0, 0), (0, 0, 255), (255, 0, 255), + (255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255), + (255, 0, 0), (0, 0, 255), (255, 0, 255), + (255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255)] + self.colors_hp = [(255, 0, 255), (255, 0, 0), (0, 0, 255), + (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), + (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), + (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), + (255, 0, 0), (0, 0, 255)] + elif num_classes == 80 or dataset == 'coco': + self.names = coco_class_name + elif num_classes == 20 or dataset == 'pascal': + self.names = pascal_class_name + elif num_classes == 1 and dataset == 'table': + self.names = table_class_name + elif num_classes == 16 or dataset == 'huntie': + self.names = huntie_class_name + elif dataset == 'vehicle': + self.names = vehicle_class_name + elif num_classes == 2 or dataset == 'video': + self.names = video_class_name + elif dataset == 'gta': + self.names = gta_class_name + self.focal_length = 935.3074360871937 + self.W = 1920 + self.H = 1080 + self.dim_scale = 3 + elif dataset == 'viper': + self.names = gta_class_name + self.focal_length = 1158 + self.W = 1920 + self.H = 1080 + self.dim_scale = 3 + elif num_classes == 3 or dataset == 'kitti': + self.names = kitti_class_name + self.focal_length = 721.5377 + self.W = 1242 + self.H = 375 + # num_classes = len(self.names) + self.down_ratio = down_ratio + # for bird view + self.world_size = 64 + self.out_size = 384 + + def add_img(self, img, img_id='default', revert_color=False): + if revert_color: + img = 255 - img + self.imgs[img_id] = img.copy() + + def add_mask(self, mask, bg, imgId='default', trans=0.8): + self.imgs[imgId] = (mask.reshape( + mask.shape[0], mask.shape[1], 1) * 255 * trans + \ + bg * (1 - trans)).astype(np.uint8) + + def show_img(self, pause=False, imgId='default'): + cv2.imshow('{}'.format(imgId), self.imgs[imgId]) + if pause: + cv2.waitKey() + + def add_blend_img(self, back, fore, img_id='blend', trans=0.7): + if self.theme == 'white': + fore = 255 - fore + if fore.shape[0] != back.shape[0] or fore.shape[0] != back.shape[1]: + fore = cv2.resize(fore, (back.shape[1], back.shape[0])) + if len(fore.shape) == 2: + fore = fore.reshape(fore.shape[0], fore.shape[1], 1) + self.imgs[img_id] = (back * (1. - trans) + fore * trans) + self.imgs[img_id][self.imgs[img_id] > 255] = 255 + self.imgs[img_id][self.imgs[img_id] < 0] = 0 + self.imgs[img_id] = self.imgs[img_id].astype(np.uint8).copy() + + ''' + # slow version + def gen_colormap(self, img, output_res=None): + # num_classes = len(self.colors) + img[img < 0] = 0 + h, w = img.shape[1], img.shape[2] + if output_res is None: + output_res = (h * self.down_ratio, w * self.down_ratio) + color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8) + for i in range(img.shape[0]): + resized = cv2.resize(img[i], (output_res[1], output_res[0])) + resized = resized.reshape(output_res[0], output_res[1], 1) + cl = self.colors[i] if not (self.theme == 'white') \ + else 255 - self.colors[i] + color_map = np.maximum(color_map, (resized * cl).astype(np.uint8)) + return color_map + ''' + + def gen_colormap(self, img, output_res=None): + img = img.copy() + c, h, w = img.shape[0], img.shape[1], img.shape[2] + if output_res is None: + output_res = (h * self.down_ratio, w * self.down_ratio) + img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32) + colors = np.array( + self.colors, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3) + if self.theme == 'white': + colors = 255 - colors + color_map = (img * colors).max(axis=2).astype(np.uint8) + color_map = cv2.resize(color_map, (output_res[0], output_res[1])) + return color_map + + ''' + # slow + def gen_colormap_hp(self, img, output_res=None): + # num_classes = len(self.colors) + # img[img < 0] = 0 + h, w = img.shape[1], img.shape[2] + if output_res is None: + output_res = (h * self.down_ratio, w * self.down_ratio) + color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8) + for i in range(img.shape[0]): + resized = cv2.resize(img[i], (output_res[1], output_res[0])) + resized = resized.reshape(output_res[0], output_res[1], 1) + cl = self.colors_hp[i] if not (self.theme == 'white') else \ + (255 - np.array(self.colors_hp[i])) + color_map = np.maximum(color_map, (resized * cl).astype(np.uint8)) + return color_map + ''' + + def gen_colormap_hp(self, img, output_res=None): + c, h, w = img.shape[0], img.shape[1], img.shape[2] + if output_res is None: + output_res = (h * self.down_ratio, w * self.down_ratio) + img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32) + colors = np.array( + self.colors_hp, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3) + if self.theme == 'white': + colors = 255 - colors + color_map = (img * colors).max(axis=2).astype(np.uint8) + color_map = cv2.resize(color_map, (output_res[0], output_res[1])) + return color_map + + def add_rect(self, rect1, rect2, c, conf=1, img_id='default'): + cv2.rectangle( + self.imgs[img_id], (rect1[0], rect1[1]), (rect2[0], rect2[1]), c, 2) + if conf < 1: + cv2.circle(self.imgs[img_id], (rect1[0], rect1[1]), int(10 * conf), c, 1) + cv2.circle(self.imgs[img_id], (rect2[0], rect2[1]), int(10 * conf), c, 1) + cv2.circle(self.imgs[img_id], (rect1[0], rect2[1]), int(10 * conf), c, 1) + cv2.circle(self.imgs[img_id], (rect2[0], rect1[1]), int(10 * conf), c, 1) + + def add_coco_bbox(self, bbox, cat, conf=1, show_txt=False, img_id='default'): + bbox = np.array(bbox, dtype=np.int32) + # cat = (int(cat) + 1) % 80 + cat = int(cat) + # print('cat', cat, self.names[cat]) + c = self.colors[cat][0][0].tolist() + if self.theme == 'white': + c = (255 - np.array(c)).tolist() + # txt = '{}{:.1f}'.format(self.names[cat], conf) + txt = '{}{:.1f}'.format(cat, conf) + font = cv2.FONT_HERSHEY_SIMPLEX + cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0] + cv2.rectangle( + self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 1) + if show_txt: + cv2.rectangle(self.imgs[img_id], + (bbox[0], bbox[1] - cat_size[1] - 2), + (bbox[0] + cat_size[0], bbox[1] - 2), c, -1) + cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - 2), + font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA) + + def add_4ps_coco_bbox(self, bbox, cat, conf=1, show_txt=False, img_id='default'): + bbox = np.array(bbox, dtype=np.int32) + # cat = (int(cat) + 1) % 80 + cat = int(cat) + c = self.colors[cat][0][0].tolist() + if self.theme == 'white': + c = (255 - np.array(c)).tolist() + txt = '{}_{:.1f}_{}_{}'.format(str(cat), conf, bbox[-2], bbox[-1]) + font = cv2.FONT_HERSHEY_SIMPLEX + cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0] + cv2.line(self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2) + cv2.line(self.imgs[img_id], (bbox[2], bbox[3]), (bbox[4], bbox[5]), (0, 255, 0), 2) + cv2.line(self.imgs[img_id], (bbox[4], bbox[5]), (bbox[6], bbox[7]), (255, 0, 0), 2) + cv2.line(self.imgs[img_id], (bbox[6], bbox[7]), (bbox[0], bbox[1]), (0, 255, 255), 2) + # cv2.rectangle( + # self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0,0,255), 1) + if show_txt: + # cv2.rectangle(self.imgs[img_id], + # (bbox[0], bbox[1] - cat_size[1] - 2), + # (bbox[0] + cat_size[0], bbox[1] - 2), c, -1) + cv2.putText(self.imgs[img_id], txt, (int((bbox[0] + bbox[6]) / 2), int((bbox[1] + bbox[7]) / 2)), + font, 1, (0, 0, 255), thickness=1, lineType=cv2.LINE_AA) + + def add_coco_hp(self, points, img_id='default'): + points = np.array(points, dtype=np.int32).reshape(self.num_joints, 2) + for j in range(self.num_joints): + cv2.circle(self.imgs[img_id], + (points[j, 0], points[j, 1]), 3, self.colors_hp[j], -1) + for j, e in enumerate(self.edges): + if points[e].min() > 0: + cv2.line(self.imgs[img_id], (points[e[0], 0], points[e[0], 1]), + (points[e[1], 0], points[e[1], 1]), self.ec[j], 2, + lineType=cv2.LINE_AA) + + def add_points(self, points, img_id='default'): + num_classes = len(points) + # assert num_classes == len(self.colors) + for i in range(num_classes): + for j in range(len(points[i])): + c = self.colors[i, 0, 0] + cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio, + points[i][j][1] * self.down_ratio), + 5, (255, 255, 255), -1) + cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio, + points[i][j][1] * self.down_ratio), + 3, (int(c[0]), int(c[1]), int(c[2])), -1) + + def add_corner(self, corner, img_id='default'): + font = cv2.FONT_HERSHEY_SIMPLEX + cls = int(corner[2]) + if cls == 0: + rgb = (0, 0, 255) + if cls == 1: + rgb = (0, 255, 0) + if cls == 2: + rgb = (255, 0, 0) + if cls == 3: + rgb = (0, 0, 0) + cv2.circle(self.imgs[img_id], (int(corner[0]), int(corner[1])), 3, (255, 0, 0), 2) + cv2.putText(self.imgs[img_id], str(cls), (int(corner[0]) - 5, int(corner[1]) - 5), font, 0.5, rgb, thickness=1, + lineType=cv2.LINE_AA) + + def show_all_imgs(self, pause=False, time=0): + if not self.ipynb: + for i, v in self.imgs.items(): + cv2.imshow('{}'.format(i), v) + if cv2.waitKey(0 if pause else 1) == 27: + import sys + sys.exit(0) + else: + self.ax = None + nImgs = len(self.imgs) + fig = self.plt.figure(figsize=(nImgs * 10, 10)) + nCols = nImgs + nRows = nImgs // nCols + for i, (k, v) in enumerate(self.imgs.items()): + fig.add_subplot(1, nImgs, i + 1) + if len(v.shape) == 3: + self.plt.imshow(cv2.cvtColor(v, cv2.COLOR_BGR2RGB)) + else: + self.plt.imshow(v) + self.plt.show() + + def save_img(self, imgId='default', path='./cache/debug/'): + cv2.imwrite(path + '{}.png'.format(imgId), self.imgs[imgId]) + + def save_all_imgs(self, image_name, path='./cache/debug/', prefix='', genID=False): + if genID: + try: + idx = int(np.loadtxt(path + '/id.txt')) + except: + idx = 0 + prefix = idx + np.savetxt(path + '/id.txt', np.ones(1) * (idx + 1), fmt='%d') + for i, v in self.imgs.items(): + # pdb.set_trace() + # cv2.imwrite(path + '/{}{}.png'.format(prefix,i), v) + cv2.imwrite(path + '/%s' % image_name, v) + # print(path+'/%s'%image_name) + + def remove_side(self, img_id, img): + if not (img_id in self.imgs): + return + ws = img.sum(axis=2).sum(axis=0) + l = 0 + while ws[l] == 0 and l < len(ws): + l += 1 + r = ws.shape[0] - 1 + while ws[r] == 0 and r > 0: + r -= 1 + hs = img.sum(axis=2).sum(axis=1) + t = 0 + while hs[t] == 0 and t < len(hs): + t += 1 + b = hs.shape[0] - 1 + while hs[b] == 0 and b > 0: + b -= 1 + self.imgs[img_id] = self.imgs[img_id][t:b + 1, l:r + 1].copy() + + def project_3d_to_bird(self, pt): + pt[0] += self.world_size / 2 + pt[1] = self.world_size - pt[1] + pt = pt * self.out_size / self.world_size + return pt.astype(np.int32) + + def add_ct_detection( + self, img, dets, show_box=False, show_txt=True, + center_thresh=0.5, img_id='det'): + # dets: max_preds x 5 + self.imgs[img_id] = img.copy() + if type(dets) == type({}): + for cat in dets: + for i in range(len(dets[cat])): + if dets[cat][i, 2] > center_thresh: + cl = (self.colors[cat, 0, 0]).tolist() + ct = dets[cat][i, :2].astype(np.int32) + if show_box: + w, h = dets[cat][i, -2], dets[cat][i, -1] + x, y = dets[cat][i, 0], dets[cat][i, 1] + bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2], + dtype=np.float32) + self.add_coco_bbox( + bbox, cat - 1, dets[cat][i, 2], + show_txt=show_txt, img_id=img_id) + else: + for i in range(len(dets)): + if dets[i, 2] > center_thresh: + # print('dets', dets[i]) + cat = int(dets[i, -1]) + cl = (self.colors[cat, 0, 0] if self.theme == 'black' else \ + 255 - self.colors[cat, 0, 0]).tolist() + ct = dets[i, :2].astype(np.int32) * self.down_ratio + cv2.circle(self.imgs[img_id], (ct[0], ct[1]), 3, cl, -1) + if show_box: + w, h = dets[i, -3] * self.down_ratio, dets[i, -2] * self.down_ratio + x, y = dets[i, 0] * self.down_ratio, dets[i, 1] * self.down_ratio + bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2], + dtype=np.float32) + self.add_coco_bbox(bbox, dets[i, -1], dets[i, 2], img_id=img_id) + + def add_3d_detection( + self, image_or_path, dets, calib, show_txt=False, + center_thresh=0.5, img_id='det'): + if isinstance(image_or_path, np.ndarray): + self.imgs[img_id] = image_or_path + else: + self.imgs[img_id] = cv2.imread(image_or_path) + for cat in dets: + for i in range(len(dets[cat])): + cl = (self.colors[cat - 1, 0, 0]).tolist() + if dets[cat][i, -1] > center_thresh: + dim = dets[cat][i, 5:8] + loc = dets[cat][i, 8:11] + rot_y = dets[cat][i, 11] + # loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale + # dim = dim / self.dim_scale + if loc[2] > 1: + box_3d = compute_box_3d(dim, loc, rot_y) + box_2d = project_to_image(box_3d, calib) + self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl) + + def compose_vis_add( + self, img_path, dets, calib, + center_thresh, pred, bev, img_id='out'): + self.imgs[img_id] = cv2.imread(img_path) + # h, w = self.imgs[img_id].shape[:2] + # pred = cv2.resize(pred, (h, w)) + h, w = pred.shape[:2] + hs, ws = self.imgs[img_id].shape[0] / h, self.imgs[img_id].shape[1] / w + self.imgs[img_id] = cv2.resize(self.imgs[img_id], (w, h)) + self.add_blend_img(self.imgs[img_id], pred, img_id) + for cat in dets: + for i in range(len(dets[cat])): + cl = (self.colors[cat - 1, 0, 0]).tolist() + if dets[cat][i, -1] > center_thresh: + dim = dets[cat][i, 5:8] + loc = dets[cat][i, 8:11] + rot_y = dets[cat][i, 11] + # loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale + # dim = dim / self.dim_scale + if loc[2] > 1: + box_3d = compute_box_3d(dim, loc, rot_y) + box_2d = project_to_image(box_3d, calib) + box_2d[:, 0] /= hs + box_2d[:, 1] /= ws + self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl) + self.imgs[img_id] = np.concatenate( + [self.imgs[img_id], self.imgs[bev]], axis=1) + + def add_2d_detection( + self, img, dets, show_box=False, show_txt=True, + center_thresh=0.5, img_id='det'): + self.imgs[img_id] = img + for cat in dets: + for i in range(len(dets[cat])): + cl = (self.colors[cat - 1, 0, 0]).tolist() + if dets[cat][i, -1] > center_thresh: + bbox = dets[cat][i, 1:5] + self.add_coco_bbox( + bbox, cat - 1, dets[cat][i, -1], + show_txt=show_txt, img_id=img_id) + + def add_bird_view(self, dets, center_thresh=0.3, img_id='bird'): + bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230 + for cat in dets: + cl = (self.colors[cat - 1, 0, 0]).tolist() + lc = (250, 152, 12) + for i in range(len(dets[cat])): + if dets[cat][i, -1] > center_thresh: + dim = dets[cat][i, 5:8] + loc = dets[cat][i, 8:11] + rot_y = dets[cat][i, 11] + rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]] + for k in range(4): + rect[k] = self.project_3d_to_bird(rect[k]) + # cv2.circle(bird_view, (rect[k][0], rect[k][1]), 2, lc, -1) + cv2.polylines( + bird_view, [rect.reshape(-1, 1, 2).astype(np.int32)], + True, lc, 2, lineType=cv2.LINE_AA) + for e in [[0, 1]]: + t = 4 if e == [0, 1] else 1 + cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]), + (rect[e[1]][0], rect[e[1]][1]), lc, t, + lineType=cv2.LINE_AA) + self.imgs[img_id] = bird_view + + def add_bird_views(self, dets_dt, dets_gt, center_thresh=0.3, img_id='bird'): + alpha = 0.5 + bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230 + for ii, (dets, lc, cc) in enumerate( + [(dets_gt, (12, 49, 250), (0, 0, 255)), + (dets_dt, (250, 152, 12), (255, 0, 0))]): + # cc = np.array(lc, dtype=np.uint8).reshape(1, 1, 3) + for cat in dets: + cl = (self.colors[cat - 1, 0, 0]).tolist() + for i in range(len(dets[cat])): + if dets[cat][i, -1] > center_thresh: + dim = dets[cat][i, 5:8] + loc = dets[cat][i, 8:11] + rot_y = dets[cat][i, 11] + rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]] + for k in range(4): + rect[k] = self.project_3d_to_bird(rect[k]) + if ii == 0: + cv2.fillPoly( + bird_view, [rect.reshape(-1, 1, 2).astype(np.int32)], + lc, lineType=cv2.LINE_AA) + else: + cv2.polylines( + bird_view, [rect.reshape(-1, 1, 2).astype(np.int32)], + True, lc, 2, lineType=cv2.LINE_AA) + # for e in [[0, 1], [1, 2], [2, 3], [3, 0]]: + for e in [[0, 1]]: + t = 4 if e == [0, 1] else 1 + cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]), + (rect[e[1]][0], rect[e[1]][1]), lc, t, + lineType=cv2.LINE_AA) + self.imgs[img_id] = bird_view + + +kitti_class_name = [ + 'p', 'v', 'b' +] + +gta_class_name = [ + 'p', 'v' +] + +pascal_class_name = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", + "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] + +coco_class_name = [ + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', + 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', + 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', + 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', + 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', + 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', + 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', + 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', + 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', + 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', + 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy bear', 'hair drier', 'toothbrush' +] + +table_class_name = ["box"] + +huntie_class_name = ['hcp', 'fjxcd', 'czcfp', 'defp', 'zzsfp', + 'qtfp', 'sfz_front', 'sfz_back', 'xsz_first', 'xsz_second', + 'bank_card', 'jsz_first', 'roll_ticket', 'czr', 'huzhu', 'FedEx', + 'birth_certification', 'blicence', 'car_invoice', 'estate', 'food_blicence', + 'food_plicence', "jsz_first", "passport_china", "permit_china", + "permit_china_miner", "house_cert", "book_blicense", "medical_license", + "medical_instrument_license"] + +video_class_name = ['phone_contract', 'phone_signature'] + +vehicle_class_name = ["first", "second"] + +color_list = np.array( + [ + 1.000, 1.000, 1.000, + 0.850, 0.325, 0.098, + 0.929, 0.694, 0.125, + 0.494, 0.184, 0.556, + 0.466, 0.674, 0.188, + 0.301, 0.745, 0.933, + 0.635, 0.078, 0.184, + 0.300, 0.300, 0.300, + 0.600, 0.600, 0.600, + 1.000, 0.000, 0.000, + 1.000, 0.500, 0.000, + 0.749, 0.749, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 1.000, + 0.667, 0.000, 1.000, + 0.333, 0.333, 0.000, + 0.333, 0.667, 0.000, + 0.333, 1.000, 0.000, + 0.667, 0.333, 0.000, + 0.667, 0.667, 0.000, + 0.667, 1.000, 0.000, + 1.000, 0.333, 0.000, + 1.000, 0.667, 0.000, + 1.000, 1.000, 0.000, + 0.000, 0.333, 0.500, + 0.000, 0.667, 0.500, + 0.000, 1.000, 0.500, + 0.333, 0.000, 0.500, + 0.333, 0.333, 0.500, + 0.333, 0.667, 0.500, + 0.333, 1.000, 0.500, + 0.667, 0.000, 0.500, + 0.667, 0.333, 0.500, + 0.667, 0.667, 0.500, + 0.667, 1.000, 0.500, + 1.000, 0.000, 0.500, + 1.000, 0.333, 0.500, + 1.000, 0.667, 0.500, + 1.000, 1.000, 0.500, + 0.000, 0.333, 1.000, + 0.000, 0.667, 1.000, + 0.000, 1.000, 1.000, + 0.333, 0.000, 1.000, + 0.333, 0.333, 1.000, + 0.333, 0.667, 1.000, + 0.333, 1.000, 1.000, + 0.667, 0.000, 1.000, + 0.667, 0.333, 1.000, + 0.667, 0.667, 1.000, + 0.667, 1.000, 1.000, + 1.000, 0.000, 1.000, + 1.000, 0.333, 1.000, + 1.000, 0.667, 1.000, + 0.167, 0.000, 0.000, + 0.333, 0.000, 0.000, + 0.500, 0.000, 0.000, + 0.667, 0.000, 0.000, + 0.833, 0.000, 0.000, + 1.000, 0.000, 0.000, + 0.000, 0.167, 0.000, + 0.000, 0.333, 0.000, + 0.000, 0.500, 0.000, + 0.000, 0.667, 0.000, + 0.000, 0.833, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 0.167, + 0.000, 0.000, 0.333, + 0.000, 0.000, 0.500, + 0.000, 0.000, 0.667, + 0.000, 0.000, 0.833, + 0.000, 0.000, 1.000, + 0.000, 0.000, 0.000, + 0.143, 0.143, 0.143, + 0.286, 0.286, 0.286, + 0.429, 0.429, 0.429, + 0.571, 0.571, 0.571, + 0.714, 0.714, 0.714, + 0.857, 0.857, 0.857, + 0.000, 0.447, 0.741, + 0.50, 0.5, 0 + ] +).astype(np.float32) +color_list = color_list.reshape((-1, 3)) * 255 diff --git a/pix2text/doc_xl_layout/utils/evaluation_bk.py b/pix2text/doc_xl_layout/utils/evaluation_bk.py new file mode 100644 index 0000000000000000000000000000000000000000..3f9316ec0529cbae0874a6a63864d396289943a8 --- /dev/null +++ b/pix2text/doc_xl_layout/utils/evaluation_bk.py @@ -0,0 +1,437 @@ +import json +import os +import sys + +import cv2 +import numpy as np +from shapely.geometry import Polygon +from tabulate import tabulate +import time + +def visual_badcase(image_name, pred_list, label_list, output_dir="visual_badcase", info=None, prefix=''): + """ + """ + image_name = image_name + '.jpg' + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + image_dir = os.path.abspath('../../data/huntie/test_images/') + image_path = os.path.join(image_dir, image_name) + img = cv2.imread(image_path) + if img is None: + print("--> Warning: skip, given image dir NOT exists: {}".format(image_path)) + return None + + font = cv2.FONT_HERSHEY_SIMPLEX + img = cv2.imread(image_path) + for label in label_list: + points, class_id = label[:8], label[8] + pts = np.array(points).reshape((1, -1, 2)).astype(np.int32) + cv2.polylines(img, pts, isClosed=True, color=(0, 255, 0), thickness=3) + cv2.putText(img, "gt:" + str(class_id), tuple(pts[0][0].tolist()), font, 1, (0, 255, 0), 2) + + for label in pred_list: + points, class_id = label[:8], label[8] + pts = np.array(points).reshape((1, -1, 2)).astype(np.int32) + cv2.polylines(img, pts, isClosed=True, color=(255, 0, 0), thickness=3) + cv2.putText(img, "pred:" + str(class_id), tuple(pts[0][-1].tolist()), font, 1, (255, 0, 0), 2) + + if info is not None: + cv2.putText(img, str(info), (40, 40), font, 1, (0, 0, 255), 2) + output_path = os.path.join(output_dir, prefix + os.path.basename(image_path)) + print("--> info: visualizing badcase: {}".format(output_path)) + cv2.imwrite(output_path, img) + + +def load_gt_from_json(json_path): + """ + """ + with open(json_path) as f: + gt_info = json.load(f) + gt_image_list = gt_info["images"] + gt_anno_list = gt_info["annotations"] + + id_to_image_info = {} + for image_item in gt_image_list: + id_to_image_info[image_item['id']] = { + "file_name": image_item['file_name'], + "group_name": image_item.get("group_name", "huntie") + } + + group_info = {} + for annotation_item in gt_anno_list: + image_info = id_to_image_info[annotation_item['image_id']] + image_name, group_name = image_info["file_name"], image_info["group_name"] + + if group_name not in group_info: + group_info[group_name] = {} + if image_name not in group_info[group_name]: + group_info[group_name][image_name] = [] + anno_info = { + "category_id": annotation_item["category_id"], + "poly": annotation_item["poly"], + "secondary_id": annotation_item.get("secondary_id", -1), + "direction_id": annotation_item.get("direction_id", -1) + } + group_info[group_name][image_name].append(anno_info) + + group_info_str = ", ".join(["{}[{}]".format(k, len(v)) for k, v in group_info.items()]) + print("--> load {} groups: {}".format(len(group_info.keys()), group_info_str)) + return group_info + +def save_res_to_file(table_head, table_body_sorted): + with open('val_out.txt', 'a') as fout: + fout.write(time.strftime('%Y-%m-%d-%H-%M') + '\n') + fout.write('\t'.join(table_head) + '\n') + for line in table_body_sorted: + new_line = [] + for ele in line: + if isinstance(ele, int): + new_line.append('{:d}'.format(ele)) + elif isinstance(ele, float): + new_line.append('{:.6f}'.format(ele)) + elif isinstance(ele, str): + new_line.append(ele) + fout.write('\t'.join(new_line) + '\n') + +def calc_iou(label, detect): + label_box = [] + detect_box = [] + + d_area = [] + for i in range(0, len(detect)): + pred_poly = detect[i]["poly"] + box_det = [] + for k in range(0, 4): + box_det.append([pred_poly[2 * k], pred_poly[2 * k + 1]]) + detect_box.append(box_det) + try: + poly = Polygon(box_det) + d_area.append(poly.area) + except: + print('invalid detects', pred_poly) + exit(-1) + + l_area = [] + for i in range(0, len(label)): + gt_poly = label[i]["poly"] + box_gt = [] + for k in range(4): + box_gt.append([gt_poly[2 * k], gt_poly[2 * k + 1]]) + label_box.append(box_gt) + try: + poly = Polygon(box_gt) + l_area.append(poly.area) + except: + print('invalid detects', gt_poly) + exit(-1) + + ol_areas = [] + for i in range(0, len(detect_box)): + ol_areas.append([]) + poly1 = Polygon(detect_box[i]) + for j in range(0, len(label_box)): + poly2 = Polygon(label_box[j]) + try: + ol_area = poly2.intersection(poly1).area + except: + print('invaild pair', detect_box[i], label_box[j]) + ol_areas[i].append(0.0) + else: + ol_areas[i].append(ol_area) + + d_ious = [0.0] * len(detect_box) + l_ious = [0.0] * len(label_box) + det2label_idx = [-1] * len(detect_box) # 每个检测框iou最大标注框的index + for i in range(0, len(detect_box)): + for j in range(0, len(label_box)): + if int(label[j]["category_id"]) == int(detect[i]["category_id"]): + # iou = min(ol_areas[i][j] / (d_area[i] + 1e-10), ol_areas[i][j] / (l_area[j] + 1e-10)) + iou = ol_areas[i][j] / (d_area[i] + l_area[j] - ol_areas[i][j] + 1e-10) + else: + iou = 0 + det2label_idx[i] = j if iou > d_ious[i] else det2label_idx[i] + d_ious[i] = max(d_ious[i], iou) + l_ious[j] = max(l_ious[j], iou) + return l_ious, d_ious, det2label_idx + + +def eval(instance_info): + img_name, label_info = instance_info + label = label_info['gt'] + detect = label_info['det'] + l_ious, d_ious, det2label_idx = calc_iou(label, detect) + return [img_name, d_ious, l_ious, detect, label, det2label_idx] + + +def static_with_class(rets, iou_thresh=0.7, is_verbose=True, map_info=None): + if is_verbose: + table_head = ['Class_id', 'Class_name', 'Pre_hit', 'Pre_num', 'GT_hit', 'GT_num', 'Precision', 'Recall', 'F-score', 'All_recalled', 'Img_num', 'Acc.'] + else: + table_head = ['Class_id', 'Class_name', 'Precision', 'Recall', 'F-score'] + table_body = [] + class_dict = {} + all_dict = {} # 用以统计合计结果 + all_dict['dm'] = 0 + all_dict['dv'] = 0 + all_dict['lm'] = 0 + all_dict['lv'] = 0 + all_dict['Img_num'] = 0 + all_dict['All_recalled'] = 0 + + no_need_keys = ['group_name', 'poly', 'score' , 'category_id'] + # import pdb; pdb.set_trace() + extra_keys = [_ for _ in rets[0][4][0].keys() if _ not in no_need_keys] + # extra_table_heads = [[_] for _ in rets[0][4][0].keys() if _ not in no_need_keys] + extra_table_heads = {} + extra_dict = {} + extra_table_body = {} + for key in extra_keys: + extra_table_heads[key] = [key, 'Name', 'Pre_hit', 'Pre_num', 'GT_hit', 'GT_num', 'Precision', 'Recall', 'F-score'] + extra_dict[key] = {} + extra_table_body[key] = [] + # _ += ['Pre_hit', 'Pre_num', 'GT_hit', 'GT_num', 'Precision', 'Recall', 'F-score'] + + # pdb.set_trace() + for i in range(len(rets)): + img_name, d_ious, l_ious, detects, labels, det2label_idx = rets[i] + item_lv, item_dv, item_dm, item_lm = 0, 0, 0, 0 + current_dict = {} + + for label in labels: + item_lv += 1 + category_id = label["category_id"] + if category_id not in class_dict: + class_dict[category_id] = {} + class_dict[category_id]['dm'] = 0 + class_dict[category_id]['dv'] = 0 + class_dict[category_id]['lm'] = 0 + class_dict[category_id]['lv'] = 0 + class_dict[category_id]['Img_num'] = 0 + class_dict[category_id]['All_recalled'] = 0 + class_dict[category_id]['lv'] += 1 + + category_container = [] + for label in labels: + if label['category_id'] not in category_container: + category_container.append(label['category_id']) + for category_id in category_container: + class_dict[category_id]['Img_num'] += 1 + current_dict[category_id] = {'dm':0, 'dv':0, 'lm':0, 'lv':0, 'Img_num':0, 'All_recalled':0} + # 统计各额外key的id list和label、detect中检出的量 + for key in extra_keys: + for label in labels: + if label[key] not in extra_dict[key] and label[key] != -1: + extra_dict[key][label[key]] = {'dm':0, 'dv':0, 'lm':0, 'lv':0} + for det in detects: + if det[key] not in extra_dict[key] and det[key] != -1: + extra_dict[key][det[key]] = {'dm':0, 'dv':0, 'lm':0, 'lv':0} + for label in labels: + if label[key] != -1: + extra_dict[key][label[key]]['lv'] += 1 + for det in detects: + if det[key] != -1: + try: + extra_dict[key][det[key]]['dv'] += 1 + except: + import pdb; pdb.set_trace() + + for label in labels: + current_dict[label['category_id']]['lv'] += 1 + for det in detects: + current_dict[label['category_id']]['dv'] += 1 + + for det in detects: + item_dv += 1 + category_id = det["category_id"] + if category_id not in class_dict: + print("--> category_id not exists in gt: {}".format(category_id)) + continue + class_dict[category_id]['dv'] += 1 + + for idx, iou in enumerate(d_ious): + if iou >= iou_thresh: + item_dm += 1 + class_dict[detects[idx]["category_id"]]['dm'] += 1 + current_dict[detects[idx]["category_id"]]['dm'] += 1 + + for key in extra_keys: + if labels[det2label_idx[idx]][key] != -1 and detects[idx][key] == labels[det2label_idx[idx]][key]: + extra_dict[key][detects[idx][key]]['dm'] += 1 + extra_dict[key][detects[idx][key]]['lm'] += 1 + + for idx, iou in enumerate(l_ious): + if iou >= iou_thresh: + item_lm += 1 + class_dict[labels[idx]["category_id"]]['lm'] += 1 + current_dict[labels[idx]["category_id"]]['lm'] += 1 + + + + # 将recall append到结果list当中 + item_r = item_lm / (item_lv + 1e-6) + # item_p = item_dm / (item_dv + 1e-6) + # item_f = 2 * item_p * item_r / (item_p + item_r + 1e-6) + # if (1 - item_r) < 1e-5: + # class_dict[category_id]['All_recalled'] += 1 + rets[i].append(item_r) + + # 计算各个box类别全召回率 + for category_id in category_container: + id_recall = current_dict[category_id]['lm'] / (current_dict[category_id]['lv'] + 1e-6) + if (1 - id_recall) < 1e-5: + class_dict[category_id]['All_recalled'] += 1 + + # 计算所有类别总计的全召回率 + # import pdb; pdb.set_trace() + all_dict['dv'] += item_dv + all_dict['lv'] += item_lv + all_dict['dm'] += item_dm + all_dict['lm'] += item_lm + all_dict['Img_num'] += 1 + if (1 - item_r) < 1e-5: + all_dict['All_recalled'] += 1 + + # if img_name == 'train10w_val2w_69008cd9828a455fb1bf751a95ad8921.jpg': + # import pdb; pdb.set_trace() + # if item_r + # if item_f < 0.97 and is_save_badcase: + # prefix = '_'.join(map(str, sorted(list(badcase_class_set)))) + '_' + # item_info = "IOU{}, {}, {}, {}".format(iou_thresh, item_r, item_p, item_f) + # visual_badcase(img_name, detects, labels, output_dir="visual_badcase", info=item_info, prefix=prefix) + + dm, dv, lm, lv, total, recalled = 0, 0, 0, 0, 0, 0 + map_info = {} if map_info is None else map_info + for key in class_dict.keys(): + dm += class_dict[key]['dm'] + dv += class_dict[key]['dv'] + lm += class_dict[key]['lm'] + lv += class_dict[key]['lv'] + recalled += class_dict[category_id]['All_recalled'] + total += class_dict[key]['Img_num'] + p = class_dict[key]['dm'] / (class_dict[key]['dv'] + 1e-6) + r = class_dict[key]['lm'] / (class_dict[key]['lv'] + 1e-6) + fscore = 2 * p * r / (p + r + 1e-6) + acc = class_dict[key]['All_recalled'] / (class_dict[key]['Img_num'] + 1e-6) + if is_verbose: + table_body.append((key, map_info.get("primary_map", {}).get(str(key), str(key)), class_dict[key]['dm'], + class_dict[key]['dv'], class_dict[key]['lm'], class_dict[key]['lv'], p, r, fscore, + class_dict[category_id]['All_recalled'], class_dict[key]['Img_num'], acc)) + else: + table_body.append((key, map_info.get(str(key), str(key)), p, r, fscore)) + + p = dm / (dv + 1e-6) + r = lm / (lv + 1e-6) + f = 2 * p * r / (p + r + 1e-6) + acc = recalled / (total + 1e-6) + table_body_sorted = sorted(table_body, key=lambda x: int((x[0]))) + if is_verbose: + table_body_sorted.append(('IOU_{}'.format(iou_thresh), 'average', dm, dv, lm, lv, p, r, f, + all_dict['All_recalled'], all_dict['Img_num'], (all_dict['All_recalled']/all_dict['Img_num']+1e-6))) + else: + table_body_sorted.append(('IOU_{}'.format(iou_thresh), 'average', p, r, f)) + # import pdb; pdb.set_trace() + save_res_to_file(table_head, table_body_sorted) + print(tabulate(table_body_sorted, headers=table_head, tablefmt='pipe')) + # ---------------print(extra_keys) + for _key in extra_dict.keys(): + dm, dv, lm, lv = 0, 0, 0, 0 + for key in extra_dict[_key].keys(): + dm += extra_dict[_key][key]['dm'] + dv += extra_dict[_key][key]['dv'] + lm += extra_dict[_key][key]['lm'] + lv += extra_dict[_key][key]['lv'] + # 找当前key对应的map_info key的name + map_name = '' + for candidate_name in map_info.keys(): + if candidate_name.split('_')[0] == _key.split('_')[0]: + map_name = candidate_name + + precision = extra_dict[_key][key]['dm'] / (extra_dict[_key][key]['dv'] + 1e-6) + recall = extra_dict[_key][key]['lm'] / (extra_dict[_key][key]['lv'] + 1e-6) + fscore = 2 * precision * recall / (precision + recall + 1e-6) + if map_name == '': # 没有在map_info中找到对应类表 + extra_table_body[_key].append((key, '', extra_dict[_key][key]['dm'], extra_dict[_key][key]['dv'], + extra_dict[_key][key]['lm'], extra_dict[_key][key]['lv'], + precision, recall, fscore)) + else: + extra_table_body[_key].append((key, map_info.get(map_name, {}).get(str(key), str(key)), extra_dict[_key][key]['dm'], extra_dict[_key][key]['dv'], + extra_dict[_key][key]['lm'], extra_dict[_key][key]['lv'], + precision, recall, fscore)) + extra_table_body[_key] = sorted(extra_table_body[_key], key=lambda x: int((x[0]))) + p = dm / (dv + 1e-6) + r = lm / (lv + 1e-6) + f = 2 * p * r / (p + r + 1e-6) + extra_table_body[_key].append((key, 'average', dm, dv, lm, lv, p, r, f)) + # import pdb; pdb.set_trace() + for _key in extra_keys: + save_res_to_file(extra_table_heads[_key], extra_table_body[_key]) + print(tabulate(extra_table_body[_key], headers=extra_table_heads[_key], tablefmt='pipe')) + + return [table_head] + table_body_sorted + + +def multiproc(func, task_list, proc_num=30, retv=True, progress_bar=False): + from multiprocessing import Pool + pool = Pool(proc_num) + + rets = [] + if progress_bar: + import tqdm + with tqdm.tqdm(total=len(task_list)) as t: + for ret in pool.imap(func, task_list): + rets.append(ret) + t.update(1) + else: + for ret in pool.imap(func, task_list): + rets.append(ret) + + pool.close() + pool.join() + + if retv: + return rets + + +def eval_and_show(label_dict, detect_dict, output_dir, iou_thresh=0.7, map_info=None): + """ + """ + evaluation_group_info = {} + for group_name, gt_info in label_dict.items(): + group_pair_list = [] + for file_name, value_list in gt_info.items(): + if file_name not in detect_dict: + # print("--> missing pred:", file_name) + continue + group_pair_list.append([file_name, {'gt': gt_info[file_name], 'det': detect_dict[file_name]}]) + evaluation_group_info[group_name] = group_pair_list + + res_info_all = {} + for group_name, group_pair_list in evaluation_group_info.items(): + print(" ------- group name: {} -----------".format(group_name)) + rets = multiproc(eval, group_pair_list, proc_num=16) + # import pdb; pdb.set_trace() + group_name_map_info = map_info.get(group_name, None) if map_info is not None else None + res_info = static_with_class(rets, iou_thresh=iou_thresh, map_info=group_name_map_info) + res_info_all[group_name] = res_info + + evaluation_res_info_path = os.path.join(output_dir, "results_val.json") + with open(evaluation_res_info_path, "w") as f: + json.dump(res_info_all, f, ensure_ascii=False, indent=4) + print("--> info: evaluation result is saved at {}".format(evaluation_res_info_path)) + return rets + +if __name__ == "__main__": + + if len(sys.argv) != 5: + print("Usage: python {} gt_json_path pred_json_path output_dir iou_thresh".format(__file__)) + exit(-1) + else: + print('--> info: {}'.format(sys.argv)) + gt_json_path, pred_json_path, output_dir, iou_thresh = sys.argv[1], sys.argv[2], sys.argv[3], float(sys.argv[4]) + + label_dict = load_gt_from_json(gt_json_path) + with open(pred_json_path, "r") as f: + detect_dict = json.load(f) + res_info = eval_and_show(label_dict, detect_dict, output_dir, iou_thresh=iou_thresh, map_info=None) + diff --git a/pix2text/doc_xl_layout/utils/image.py b/pix2text/doc_xl_layout/utils/image.py new file mode 100644 index 0000000000000000000000000000000000000000..0ea882def710899d7f4b5eb0ea4a8f5409d6bc43 --- /dev/null +++ b/pix2text/doc_xl_layout/utils/image.py @@ -0,0 +1,242 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (Bin.Xiao@microsoft.com) +# Modified by Xingyi Zhou +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import random + +import cv2 +import numpy as np + + +def flip(img): + return img[:, :, ::-1].copy() + + +def transform_preds(coords, center, scale, output_size): + target_coords = np.zeros(coords.shape) + trans = get_affine_transform(center, scale, 0, output_size, inv=1) + for p in range(coords.shape[0]): + target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans) + return target_coords + + +def get_affine_transform(center, + scale, + rot, + output_size, + shift=np.array([0, 0], dtype=np.float32), + inv=0): + if not isinstance(scale, np.ndarray) and not isinstance(scale, list): + scale = np.array([scale, scale], dtype=np.float32) + + scale_tmp = scale + src_w = scale_tmp[0] + dst_w = output_size[0] + dst_h = output_size[1] + + rot_rad = np.pi * rot / 180 + src_dir = get_dir([0, src_w * -0.5], rot_rad) + dst_dir = np.array([0, dst_w * -0.5], np.float32) + + src = np.zeros((3, 2), dtype=np.float32) + dst = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + scale_tmp * shift + src[1, :] = center + src_dir + scale_tmp * shift + dst[0, :] = [dst_w * 0.5, dst_h * 0.5] + dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir + + src[2:, :] = get_3rd_point(src[0, :], src[1, :]) + dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) + + if inv: + trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + + return trans + + +def affine_transform(pt, t): + new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T + new_pt = np.dot(t, new_pt) + return new_pt[:2] + + +def get_3rd_point(a, b): + direct = a - b + return b + np.array([-direct[1], direct[0]], dtype=np.float32) + + +def get_dir(src_point, rot_rad): + sn, cs = np.sin(rot_rad), np.cos(rot_rad) + + src_result = [0, 0] + src_result[0] = src_point[0] * cs - src_point[1] * sn + src_result[1] = src_point[0] * sn + src_point[1] * cs + + return src_result + + +def crop(img, center, scale, output_size, rot=0): + trans = get_affine_transform(center, scale, rot, output_size) + + dst_img = cv2.warpAffine(img, + trans, + (int(output_size[0]), int(output_size[1])), + flags=cv2.INTER_LINEAR) + + return dst_img + + +def gaussian_radius(det_size, min_overlap=0.7): + height, width = det_size + + a1 = 1 + b1 = (height + width) + c1 = width * height * (1 - min_overlap) / (1 + min_overlap) + sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1) + r1 = (b1 + sq1) / 2 + + a2 = 4 + b2 = 2 * (height + width) + c2 = (1 - min_overlap) * width * height + sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2) + r2 = (b2 + sq2) / 2 + + a3 = 4 * min_overlap + b3 = -2 * min_overlap * (height + width) + c3 = (min_overlap - 1) * width * height + sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3) + r3 = (b3 + sq3) / 2 + return min(r1, r2, r3) + + +def gaussian2D(shape, sigma=1): + m, n = [(ss - 1.) / 2. for ss in shape] + y, x = np.ogrid[-m:m + 1, -n:n + 1] + + h = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) + h[h < np.finfo(h.dtype).eps * h.max()] = 0 + return h + + +def draw_umich_gaussian(heatmap, center, radius, k=1): + diameter = 2 * radius + 1 + gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6) + + x, y = int(center[0]), int(center[1]) + + height, width = heatmap.shape[0:2] + + left, right = min(x, radius), min(width - x, radius + 1) + top, bottom = min(y, radius), min(height - y, radius + 1) + + masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] + masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right] + if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug + np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap) + return heatmap + + +def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False): + diameter = 2 * radius + 1 + gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6) + value = np.array(value, dtype=np.float32).reshape(-1, 1, 1) + dim = value.shape[0] + reg = np.ones((dim, diameter * 2 + 1, diameter * 2 + 1), dtype=np.float32) * value + if is_offset and dim == 2: + delta = np.arange(diameter * 2 + 1) - radius + reg[0] = reg[0] - delta.reshape(1, -1) + reg[1] = reg[1] - delta.reshape(-1, 1) + + x, y = int(center[0]), int(center[1]) + + height, width = heatmap.shape[0:2] + + left, right = min(x, radius), min(width - x, radius + 1) + top, bottom = min(y, radius), min(height - y, radius + 1) + + masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] + masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right] + masked_gaussian = gaussian[radius - top:radius + bottom, + radius - left:radius + right] + masked_reg = reg[:, radius - top:radius + bottom, + radius - left:radius + right] + if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug + idx = (masked_gaussian >= masked_heatmap).reshape( + 1, masked_gaussian.shape[0], masked_gaussian.shape[1]) + masked_regmap = (1 - idx) * masked_regmap + idx * masked_reg + regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap + return regmap + + +def draw_msra_gaussian(heatmap, center, sigma): + tmp_size = sigma * 3 + mu_x = int(center[0] + 0.5) + mu_y = int(center[1] + 0.5) + w, h = heatmap.shape[0], heatmap.shape[1] + ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)] + br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)] + if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0: + return heatmap + size = 2 * tmp_size + 1 + x = np.arange(0, size, 1, np.float32) + y = x[:, np.newaxis] + x0 = y0 = size // 2 + g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) + g_x = max(0, -ul[0]), min(br[0], h) - ul[0] + g_y = max(0, -ul[1]), min(br[1], w) - ul[1] + img_x = max(0, ul[0]), min(br[0], h) + img_y = max(0, ul[1]), min(br[1], w) + heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum( + heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]], + g[g_y[0]:g_y[1], g_x[0]:g_x[1]]) + return heatmap + + +def grayscale(image): + return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + + +def lighting_(data_rng, image, alphastd, eigval, eigvec): + alpha = data_rng.normal(scale=alphastd, size=(3,)) + image += np.dot(eigvec, eigval * alpha) + + +def blend_(alpha, image1, image2): + image1 *= alpha + image2 *= (1 - alpha) + image1 += image2 + + +def saturation_(data_rng, image, gs, gs_mean, var): + alpha = 1. + data_rng.uniform(low=-var, high=var) + blend_(alpha, image, gs[:, :, None]) + + +def brightness_(data_rng, image, gs, gs_mean, var): + alpha = 1. + data_rng.uniform(low=-var, high=var) + image *= alpha + + +def contrast_(data_rng, image, gs, gs_mean, var): + alpha = 1. + data_rng.uniform(low=-var, high=var) + blend_(alpha, image, gs_mean) + + +def color_aug(data_rng, image, eig_val, eig_vec): + functions = [brightness_, contrast_, saturation_] + random.shuffle(functions) + + gs = grayscale(image) + gs_mean = gs.mean() + for f in functions: + f(data_rng, image, gs, gs_mean, 0.4) + lighting_(data_rng, image, 0.1, eig_val, eig_vec) diff --git a/pix2text/doc_xl_layout/utils/post_process.py b/pix2text/doc_xl_layout/utils/post_process.py new file mode 100644 index 0000000000000000000000000000000000000000..aa6025cfe13bdcbf94461aab923bb9ebc4e1963d --- /dev/null +++ b/pix2text/doc_xl_layout/utils/post_process.py @@ -0,0 +1,143 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from .ddd_utils import ddd2locrot +from .image import transform_preds + + +def get_pred_depth(depth): + return depth + + +def get_alpha(rot): + # output: (B, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos, + # bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos] + # return rot[:, 0] + idx = rot[:, 1] > rot[:, 5] + alpha1 = np.arctan(rot[:, 2] / rot[:, 3]) + (-0.5 * np.pi) + alpha2 = np.arctan(rot[:, 6] / rot[:, 7]) + (0.5 * np.pi) + return alpha1 * idx + alpha2 * (1 - idx) + + +def ddd_post_process_2d(dets, c, s, opt): + # dets: batch x max_dets x dim + # return 1-based class det list + ret = [] + include_wh = dets.shape[2] > 16 + for i in range(dets.shape[0]): + top_preds = {} + dets[i, :, :2] = transform_preds( + dets[i, :, 0:2], c[i], s[i], (opt.output_w, opt.output_h)) + classes = dets[i, :, -1] + for j in range(opt.num_classes): + inds = (classes == j) + top_preds[j + 1] = np.concatenate([ + dets[i, inds, :3].astype(np.float32), + get_alpha(dets[i, inds, 3:11])[:, np.newaxis].astype(np.float32), + get_pred_depth(dets[i, inds, 11:12]).astype(np.float32), + dets[i, inds, 12:15].astype(np.float32)], axis=1) + if include_wh: + top_preds[j + 1] = np.concatenate([ + top_preds[j + 1], + transform_preds( + dets[i, inds, 15:17], c[i], s[i], (opt.output_w, opt.output_h)) + .astype(np.float32)], axis=1) + ret.append(top_preds) + return ret + + +def ddd_post_process_3d(dets, calibs): + # dets: batch x max_dets x dim + # return 1-based class det list + ret = [] + for i in range(len(dets)): + preds = {} + for cls_ind in dets[i].keys(): + preds[cls_ind] = [] + for j in range(len(dets[i][cls_ind])): + center = dets[i][cls_ind][j][:2] + score = dets[i][cls_ind][j][2] + alpha = dets[i][cls_ind][j][3] + depth = dets[i][cls_ind][j][4] + dimensions = dets[i][cls_ind][j][5:8] + wh = dets[i][cls_ind][j][8:10] + locations, rotation_y = ddd2locrot( + center, alpha, dimensions, depth, calibs[0]) + bbox = [center[0] - wh[0] / 2, center[1] - wh[1] / 2, + center[0] + wh[0] / 2, center[1] + wh[1] / 2] + pred = [alpha] + bbox + dimensions.tolist() + \ + locations.tolist() + [rotation_y, score] + preds[cls_ind].append(pred) + preds[cls_ind] = np.array(preds[cls_ind], dtype=np.float32) + ret.append(preds) + return ret + + +def ddd_post_process(dets, c, s, calibs, opt): + # dets: batch x max_dets x dim + # return 1-based class det list + dets = ddd_post_process_2d(dets, c, s, opt) + dets = ddd_post_process_3d(dets, calibs) + return dets + + +def ctdet_4ps_post_process(dets, c, s, h, w, num_classes): + # dets: batch x max_dets x dim + # return 1-based class det dict + ret = [] + for i in range(dets.shape[0]): + top_preds = {} + dets[i, :, 0:2] = transform_preds(dets[i, :, 0:2], c[i], s[i], (w, h)) + dets[i, :, 2:4] = transform_preds(dets[i, :, 2:4], c[i], s[i], (w, h)) + dets[i, :, 4:6] = transform_preds(dets[i, :, 4:6], c[i], s[i], (w, h)) + dets[i, :, 6:8] = transform_preds(dets[i, :, 6:8], c[i], s[i], (w, h)) + classes = dets[i, :, 9] + for j in range(num_classes): + inds = (classes == j) + top_preds[j + 1] = np.concatenate([ + dets[i, inds, :8].astype(np.float32), + dets[i, inds, 8:].astype(np.float32)], axis=1).tolist() + ret.append(top_preds) + return ret + + +def ctdet_post_process(dets, c, s, h, w, num_classes): + # dets: batch x max_dets x dim + # return 1-based class det dict + ret = [] + for i in range(dets.shape[0]): + top_preds = {} + dets[i, :, :2] = transform_preds( + dets[i, :, 0:2], c[i], s[i], (w, h)) + dets[i, :, 2:4] = transform_preds( + dets[i, :, 2:4], c[i], s[i], (w, h)) + classes = dets[i, :, -1] + for j in range(num_classes): + inds = (classes == j) + top_preds[j + 1] = np.concatenate([ + dets[i, inds, :4].astype(np.float32), + dets[i, inds, 4:5].astype(np.float32)], axis=1).tolist() + ret.append(top_preds) + return ret + + +def ctdet_corner_post_process(corner, c, s, h, w, num_classes): + corner[:, :2] = transform_preds(corner[:, 0:2], c[0], s[0], (w, h)) + return corner + + +def multi_pose_post_process(dets, c, s, h, w): + # dets: batch x max_dets x 40 + # return list of 39 in image coord + ret = [] + for i in range(dets.shape[0]): + bbox = transform_preds(dets[i, :, :4].reshape(-1, 2), c[i], s[i], (w, h)) + pts = transform_preds(dets[i, :, 5:39].reshape(-1, 2), c[i], s[i], (w, h)) + top_preds = np.concatenate( + [bbox.reshape(-1, 4), dets[i, :, 4:5], + pts.reshape(-1, 34)], axis=1).astype(np.float32).tolist() + ret.append({np.ones(1, dtype=np.int32)[0]: top_preds}) + return ret diff --git a/pix2text/doc_xl_layout/utils/utils.py b/pix2text/doc_xl_layout/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3af045a9c6786cd092c0f911555b1254c8aa27a7 --- /dev/null +++ b/pix2text/doc_xl_layout/utils/utils.py @@ -0,0 +1,23 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +class AverageMeter(object): + """Computes and stores the average and current value""" + + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + if self.count > 0: + self.avg = self.sum / self.count diff --git a/pix2text/doc_xl_layout/wrapper.py b/pix2text/doc_xl_layout/wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..7ddc6248cb5d42af472b63267452dcd2147498d3 --- /dev/null +++ b/pix2text/doc_xl_layout/wrapper.py @@ -0,0 +1,285 @@ +import math +from shapely.geometry import Polygon +from functools import cmp_to_key + + +def calc_main_angle(pts_list): + if len(pts_list) == 0: + return 0 + good_angles, other_angles = [], [] + for pts in pts_list: + d_x_1, d_y_1 = pts[2] - pts[0], pts[3] - pts[1] + d_x_2, d_y_2 = pts[4] - pts[2], pts[5] - pts[3] + + width = math.sqrt(d_x_1 ** 2 + d_y_1 ** 2) + height = math.sqrt(d_x_2 ** 2 + d_y_2 ** 2) + angle = math.atan2(d_y_1, d_x_1) + + if width > height * 3: + good_angles.append(angle) + else: + other_angles.append(angle) + + if len(good_angles) > 0: + good_angles.sort() + return good_angles[len(good_angles) // 2] + else: + other_angles.sort() + return other_angles[len(other_angles) // 2] + + +def calc_x_type(a, b): + x_type = 0 + minx_a, maxx_a = a[0], a[0] + a[2] + minx_b, maxx_b = b[0], b[0] + b[2] + + start_left = 0 + if minx_a < minx_b: + start_left = 1 + elif minx_a > minx_b: + start_left = -1 + end_right = 0 + if maxx_a > maxx_b: + end_right = 1 + elif maxx_a < maxx_b: + end_right = -1 + + if maxx_a < minx_b + 1e-4 and maxx_a < maxx_b - 1e-4: + x_type = 1 # left + elif minx_a > maxx_b - 1e-4 and minx_a > minx_b + 1e-4: + x_type = 2 # right + elif start_left == 1 and end_right == -1: + x_type = 3 # near left + elif start_left == -1 and end_right == 1: + x_type = 4 # near right + elif start_left >= 0 and end_right >= 0: + x_type = 5 # contain + elif start_left <= 0 and end_right <= 0: + x_type = 6 # inside + else: + x_type = 0 + + return x_type + + +def calc_y_type(a, b): + y_type = 0 + miny_a, maxy_a = a[1], a[1] + a[3] + miny_b, maxy_b = b[1], b[1] + b[3] + + start_up = 0 + if miny_a < miny_b: + start_up = 1 + elif miny_a > miny_b: + start_up = -1 + end_down = 0 + if maxy_a > maxy_b: + end_down = 1 + elif maxy_a < maxy_b: + end_down = -1 + + if maxy_a < miny_b + 1e-4 and maxy_a < maxy_b - 1e-4: + y_type = 1 # up + elif miny_a > maxy_b - 1e-4 and miny_a > miny_b + 1e-4: + y_type = 2 # down + elif start_up == 1 and end_down == -1: + y_type = 3 # near up + elif start_up == -1 and end_down == 1: + y_type = 4 # near down + elif start_up >= 0 and end_down >= 0: + y_type = 5 # contain + elif start_up <= 0 and end_down <= 0: + y_type = 6 # inside + else: + y_type = 0 + + return y_type + + +def sort_pts(blocks): + main_angle = calc_main_angle([blk['pts'] for blk in blocks]) + main_sin, main_cos = math.sin(main_angle), math.cos(main_angle) + + def pts2rect(pts): + xs, ys = [], [] + for k in range(0, len(pts), 2): + x0 = pts[k] * main_cos + pts[k + 1] * main_sin + y0 = pts[k + 1] * main_cos - pts[k] * main_sin + xs.append(x0) + ys.append(y0) + minx, maxx, miny, maxy = min(xs), max(xs), min(ys), max(ys) + rect = [minx, miny, maxx - minx, maxy - miny] + # print('===', pts, '->', rect) + return rect + + def cmp_pts_udlr(a, b, thres=0.5): + rect_a, rect_b = pts2rect(a['pts']), pts2rect(b['pts']) + minx_a, miny_a, maxx_a, maxy_a = ( + rect_a[0], + rect_a[1], + rect_a[0] + rect_a[2], + rect_a[1] + rect_a[3], + ) + minx_b, miny_b, maxx_b, maxy_b = ( + rect_b[0], + rect_b[1], + rect_b[0] + rect_b[2], + rect_b[1] + rect_b[3], + ) + + x_type, y_type = calc_x_type(rect_a, rect_b), calc_y_type(rect_a, rect_b) + + y_near_rate = 0.0 + if y_type == 3: + y_near_rate = (maxy_a - miny_b) / min(maxy_a - miny_a, maxy_b - miny_b) + elif y_type == 4: + y_near_rate = (maxy_b - miny_a) / min(maxy_a - miny_a, maxy_b - miny_b) + + # print(rect_a, rect_b, x_type, y_type, y_near_rate) + # exit(0) + + if y_type == 1: + return -1 + elif y_type == 2: + return 1 + elif y_type == 3: + if x_type in [2, 4]: + if y_near_rate < thres: + return -1 + else: + return 1 + else: + return -1 + elif y_type == 4: + if x_type in [1, 3]: + if y_near_rate < thres: + return 1 + else: + return -1 + else: + return 1 + else: + if x_type == 1 or x_type == 3: + return -1 + elif x_type == 2 or x_type == 4: + return 1 + else: + center_y_diff = abs(0.5 * (miny_a + maxy_a) - 0.5 * (miny_b + maxy_b)) + max_h = max(maxy_a - miny_a, maxy_b - miny_b) + if center_y_diff / max_h < 0.1: + if (minx_a + maxx_a) < (minx_b + maxx_b): + return -1 + elif (minx_a + maxx_a) > (minx_b + maxx_b): + return 1 + else: + return 0 + else: + if (miny_a + maxy_a) < (miny_b + maxy_b): + return -1 + elif (miny_a + maxy_a) > (miny_b + maxy_b): + return 1 + else: + return 0 + + # print(blocks) + # print(cmp_pts_udlr(blocks[0], blocks[1])) + blocks.sort(key=cmp_to_key(cmp_pts_udlr)) + # print(blocks) + # exit(0) + + +def pts2poly(pts): + new_pts = [(pts[k], pts[k + 1]) for k in range(0, len(pts), 2)] + return Polygon(new_pts) + + +def pts_intersection_rate(src, tgt): + src_poly, tgt_poly = pts2poly(src), pts2poly(tgt) + src_area = src_poly.area + inter_area = src_poly.intersection(tgt_poly).area + return inter_area / src_area + + +def wrap_result(layout_detection_info, subfield_detection_info, category_map): + if layout_detection_info is None or subfield_detection_info is None: + return {} + # layout_detection_info = result["layout_dets"] + # subfield_detection_info = result["subfield_dets"] + + info = {'subfields': []} + for itm in subfield_detection_info: + subfield = { + 'category': category_map[itm['category_id']], + 'pts': itm['poly'], + 'confidence': itm['score'], + 'layouts': [], + } + info['subfields'].append(subfield) + sort_pts(info['subfields']) + + if len(info['subfields']) > 0: + other_subfield = { + 'category': '其他', + 'pts': [0, 0, 0, 0, 0, 0, 0, 0], + 'confidence': 0, + 'layouts': [], + } + for itm in layout_detection_info: + layout = { + 'category': category_map[itm['category_id']], + 'pts': itm['poly'], + 'confidence': itm['score'], + } + best_rate, best_idx = 0.0, -1 + for k in range(len(info['subfields'])): + inter_rate = pts_intersection_rate( + layout['pts'], info['subfields'][k]['pts'] + ) + if inter_rate > best_rate: + best_rate = inter_rate + best_idx = k + if best_idx >= 0 and best_rate > 0.1: + info['subfields'][best_idx]['layouts'].append(layout) + else: + other_subfield['layouts'].append(layout) + if len(other_subfield['layouts']) > 0: + info['subfields'].append(other_subfield) + else: + subfield = { + 'category': '其他', + 'pts': [0, 0, 0, 0, 0, 0, 0, 0], + 'confidence': 0, + 'layouts': [], + } + info['subfields'].append(subfield) + for itm in layout_detection_info: + layout = { + 'category': category_map[itm['category_id']], + 'pts': itm['poly'], + 'confidence': itm['score'], + } + info['subfields'][0]['layouts'].append(layout) + + for subfield in info['subfields']: + sort_pts(subfield['layouts']) + + new_subfields = [] + for subfield in info['subfields']: + if subfield['category'] != '其他': + new_subfields.append(subfield) + else: + for layout in subfield['layouts']: + layout_subfield = { + 'category': layout['category'], + 'pts': layout['pts'], + 'confidence': layout['confidence'], + 'layouts': [layout], + } + new_subfields.append(layout_subfield) + sort_pts(new_subfields) + info['layouts'] = [] + for subfield in new_subfields: + for layout in subfield['layouts']: + info['layouts'].append(layout) + + return info diff --git a/pix2text/doc_yolo_layout_parser.py b/pix2text/doc_yolo_layout_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..ea7fcd725d381325d8fc72f911c645f9aef1334d --- /dev/null +++ b/pix2text/doc_yolo_layout_parser.py @@ -0,0 +1,419 @@ +# coding: utf-8 +# use DocLayout-YOLO model for layout analysis: https://github.com/opendatalab/DocLayout-YOLO +import json +import os +import logging +import shutil +from collections import defaultdict +from copy import deepcopy, copy +from pathlib import Path +from typing import Union, Optional + +from PIL import Image +import numpy as np +import torch +import torchvision + +from .consts import MODEL_VERSION +from .layout_parser import ElementType +from .utils import ( + list2box, + clipbox, + box2list, + read_img, + save_layout_img, + data_dir, + select_device, + y_overlap, + prepare_model_files2, +) +from . import DocXLayoutParser + +from doclayout_yolo import YOLOv10 + +logger = logging.getLogger(__name__) + +CURRENT_DIR = os.path.dirname(__file__) + + +class DocYoloLayoutParser(object): + ignored_types = {"abandon", "table_footnote"} + # names: {0: 'title', 1: 'plain text', 2: 'abandon', 3: 'figure', 4: 'figure_caption', 5: 'table', 6: 'table_caption', 7: 'table_footnote', 8: 'isolate_formula', 9: 'formula_caption'} + type_mappings = { + "title": ElementType.TITLE, + "figure": ElementType.FIGURE, + "plain text": ElementType.TEXT, + "table": ElementType.TABLE, + "table_caption": ElementType.TEXT, + "figure_caption": ElementType.TEXT, + "isolate_formula": ElementType.FORMULA, + "inline formula": ElementType.FORMULA, + "formula_caption": ElementType.PLAIN_TEXT, + "ocr text": ElementType.TEXT, + } + # types that are isolated and usually don't cross different columns. They should not be merged with other elements + is_isolated = {"table_caption", "figure_caption", "isolate_formula"} + + def __init__( + self, + device: str = None, + model_fp: Optional[str] = None, + root: Union[str, Path] = data_dir(), + **kwargs, + ): + if model_fp is None: + model_fp = self._prepare_model_files(root) + device = select_device(device) + # device = 'cpu' if device == 'mps' else device + self.device = device + self.mapping = { + 0: "title", + 1: "plain text", + 2: "abandon", + 3: "figure", + 4: "figure_caption", + 5: "table", + 6: "table_caption", + 7: "table_footnote", + 8: "isolate_formula", + 9: "formula_caption", + } + logger.info("Use DocLayout-YOLO model for Layout Analysis: {}".format(model_fp)) + self.predictor = YOLOv10(model_fp) + + def _prepare_model_files(self, root): + model_root_dir = Path(root).expanduser() / MODEL_VERSION + model_dir = model_root_dir / "layout-docyolo" + model_fp = model_dir / "doclayout_yolo_docstructbench_imgsz1024.pt" + if model_fp.exists(): + return model_fp + model_fp = prepare_model_files2( + model_fp_or_dir=model_fp, + remote_repo="breezedeus/pix2text-layout-docyolo", + file_or_dir="file", + ) + return model_fp + + @classmethod + def from_config(cls, configs: Optional[dict] = None, device: str = None, **kwargs): + configs = copy(configs or {}) + device = select_device(device) + model_fp = configs.pop("model_fp", None) + root = configs.pop("root", data_dir()) + configs.pop("device", None) + + return cls(device=device, model_fp=model_fp, root=root, **configs) + + def parse( + self, + img: Union[str, Path, Image.Image], + table_as_image: bool = False, + *, + imgsz: int = 1024, # Prediction image size + conf: float = 0.2, # Confidence threshold + iou_threshold: float = 0.45, # NMS IoU threshold + **kwargs, + ): + """ + + Args: + img (): + table_as_image (): + imgsz (int): Prediction image size + conf (float): Confidence threshold + iou_threshold (float): NMS IoU threshold + **kwargs (): + * save_debug_res (str): if `save_debug_res` is set, the directory to save the debug results; default value is `None`, which means not to save + * expansion_margin (int): expansion margin + + Returns: + + """ + if isinstance(img, Image.Image): + img0 = img.convert("RGB") + else: + img0 = read_img(img, return_type="Image") + img_width, img_height = img0.size + det_res = self.predictor.predict( + img0, # Image to predict + imgsz=imgsz, # Prediction image size + conf=conf, # Confidence threshold + )[0] + scores = det_res.__dict__["boxes"].conf + boxes = det_res.__dict__["boxes"].xyxy + _classes = det_res.__dict__["boxes"].cls + + indices = torchvision.ops.nms( + boxes=torch.Tensor(boxes), + scores=torch.Tensor(scores), + iou_threshold=iou_threshold, + ) + boxes, scores, _classes = boxes[indices], scores[indices], _classes[indices] + # dtype to int + _classes = _classes.int().tolist() + + page_layout_result = [] + for box, score, _cls in zip(boxes, scores, _classes): + page_layout_result.append( + { + "type": self.mapping[_cls], + "position": list2box(*box.tolist()), + "score": float(score), + } + ) + + ignored_layout_result = [ + item for item in page_layout_result if item["type"] in self.ignored_types + ] + for x in ignored_layout_result: + x["col_number"] = -1 + ignored_layout_out, _ = self._format_outputs( + img_width, img_height, ignored_layout_result, table_as_image + ) + if page_layout_result: + # 目前 MFR 对带 tag 的公式识别效果不太好,所以暂时不合并 + # page_layout_result = self._merge_isolated_formula_and_caption(page_layout_result) + + # 去掉 ignored 类型 + _page_layout_result = [ + item + for item in page_layout_result + if item["type"] not in self.ignored_types + ] + layout_out = fetch_column_info(_page_layout_result, img_width, img_height) + layout_out, column_meta = self._format_outputs( + img_width, img_height, layout_out, table_as_image + ) + else: + layout_out, column_meta = [], {} + + debug_dir = None + if kwargs.get("save_debug_res", None): + debug_dir = Path(kwargs.get("save_debug_res")) + debug_dir.mkdir(exist_ok=True, parents=True) + if debug_dir is not None: + with open(debug_dir / "layout_out.json", "w", encoding="utf-8") as f: + json_out = deepcopy(layout_out) + for item in json_out: + item["position"] = item["position"].tolist() + item["type"] = item["type"].name + json.dump( + json_out, + f, + indent=2, + ensure_ascii=False, + ) + # layout_out = DocXLayoutParser._merge_overlapped_boxes(layout_out) + + expansion_margin = kwargs.get("expansion_margin", 8) + layout_out = DocXLayoutParser._expand_boxes( + layout_out, expansion_margin, height=img_height, width=img_width + ) + + save_layout_fp = kwargs.get( + "save_layout_res", + debug_dir / "layout_res.jpg" if debug_dir is not None else None, + ) + + layout_out.extend(ignored_layout_out) + + if save_layout_fp: + element_type_list = [t for t in ElementType] + save_layout_img( + img0, + element_type_list, + layout_out, + save_path=save_layout_fp, + key="position", + ) + + return layout_out, column_meta + + def _merge_isolated_formula_and_caption(self, page_layout_result): + # 合并孤立的公式和公式标题 + # 对于每个公式标题,找到与它在同一行且在其左侧距离最近的孤立公式,并把它们合并 + isolated_formula = [ + item for item in page_layout_result if item["type"] == "isolate_formula" + ] + formula_caption = [ + item for item in page_layout_result if item["type"] == "formula_caption" + ] + remaining_elements = [ + item + for item in page_layout_result + if item["type"] not in ["isolate_formula", "formula_caption"] + ] + for caption in formula_caption: + caption_xmin, caption_ymin, caption_xmax, caption_ymax = box2list( + caption["position"] + ) + min_dist = float("inf") + nearest_formula = None + for formula in isolated_formula: + formula_xmin, formula_ymin, formula_xmax, formula_ymax = box2list( + formula["position"] + ) + if y_overlap(caption, formula, key="position") >= 0.7: + dist = caption_xmin - formula_xmax + if 0 <= dist < min_dist: + min_dist = dist + nearest_formula = formula + if nearest_formula is not None: + new_formula = deepcopy(nearest_formula) + formula_xmin, formula_ymin, formula_xmax, formula_ymax = box2list( + new_formula["position"] + ) + new_formula["position"] = list2box( + min(caption_xmin, formula_xmin), + min(caption_ymin, formula_ymin), + max(caption_xmax, formula_xmax), + max(caption_ymax, formula_ymax), + ) + remaining_elements.append(new_formula) + isolated_formula.remove(nearest_formula) + else: # not found + remaining_elements.append(caption) + return remaining_elements + isolated_formula + + def _format_outputs(self, width, height, layout_out, table_as_image: bool): + # 获取每一列的信息 + column_numbers = set([item["col_number"] for item in layout_out]) + column_meta = defaultdict(dict) + for col_idx in column_numbers: + cur_col_res = [item for item in layout_out if item["col_number"] == col_idx] + mean_score = np.mean([item["score"] for item in cur_col_res]) + xmin, ymin, xmax, ymax = box2list(cur_col_res[0]["position"]) + for item in cur_col_res[1:]: + cur_xmin, cur_ymin, cur_xmax, cur_ymax = box2list(item["position"]) + xmin = min(xmin, cur_xmin) + ymin = min(ymin, cur_ymin) + xmax = max(xmax, cur_xmax) + ymax = max(ymax, cur_ymax) + column_meta[col_idx]["position"] = clipbox( + list2box(xmin, ymin, xmax, ymax), height, width + ) + column_meta[col_idx]["score"] = mean_score + + final_out = [] + for box_info in layout_out: + image_type = box_info["type"] + isolated = image_type in self.is_isolated + if image_type in self.ignored_types: + image_type = ElementType.IGNORED + else: + image_type = self.type_mappings.get(image_type, ElementType.UNKNOWN) + if table_as_image and image_type == ElementType.TABLE: + image_type = ElementType.FIGURE + final_out.append( + { + "type": image_type, + "position": clipbox(box_info["position"], height, width), + "score": box_info["score"], + "col_number": box_info["col_number"], + "isolated": isolated, + } + ) + + return final_out, column_meta + + +def cal_column_width(layout_res, img_width, img_height): + widths = [item["position"][1][0] - item["position"][0][0] for item in layout_res] + if len(widths) <= 2: + return min(widths + [img_width]) + + # 计算所有box的宽度和相对面积 + boxes_info = [] + for item in layout_res: + x0, y0 = item["position"][0] + x1, y1 = item["position"][2] + width = x1 - x0 + height = y1 - y0 + area = width * height + boxes_info.append({"width": width, "area": area, "y0": y0, "height": height}) + + # 按面积排序,获取最大的几个box + boxes_info.sort(key=lambda x: x["area"], reverse=True) + + # 使用面积权重计算加权平均宽度 + total_weight = 0 + weighted_width_sum = 0 + + # 只考虑面积最大的前30%的boxes + top_boxes = boxes_info[: max(2, int(len(boxes_info) * 0.3))] + + for box in top_boxes: + # 使用面积作为权重 + weight = box["area"] + # 给予页面下半部分的box更高权重(因为通常是正文区域) + if box["y0"] > img_height * 0.5: + weight *= 1.5 + weighted_width_sum += box["width"] * weight + total_weight += weight + + estimated_width = ( + weighted_width_sum / total_weight if total_weight > 0 else img_width + ) + + # 设置合理的界限 + min_width = img_width * 0.3 # 列宽不应该太窄 + max_width = img_width * 0.95 # 留一些页边距 + + return min(max(estimated_width, min_width), max_width) + + +def locate_full_column(layout_res, col_width, img_width): + # 找出跨列的模块 + for item in layout_res: + cur_width = item["position"][1][0] - item["position"][0][0] + if cur_width > col_width * 1.5 or cur_width > img_width * 0.7: + item["category"] = "full column" + item["col_number"] = 0 + else: + item["category"] = "sub column" + item["col_number"] = -1 + return layout_res + + +def fetch_column_info(layout_res, img_width, img_height): + # 获取所有模块的横坐标范围 + layout_res.sort(key=lambda x: x["position"][0][0]) + + col_width = cal_column_width(layout_res, img_width, img_height) + layout_res = locate_full_column(layout_res, col_width, img_width) + col_width = max( + [ + item["position"][1][0] - item["position"][0][0] + for item in layout_res + if item["category"] == "sub column" + ], + default=col_width, + ) + + # 分配模块到列中 + col_left = img_width + cur_col = 1 + for idx, info in enumerate(layout_res): + if info["category"] == "full column": + continue + xmin, xmax = info["position"][0][0], info["position"][1][0] + if col_left == img_width: + col_left = xmin + if xmin < col_left + col_width * 0.99 and xmax <= xmin + col_width * 1.02: + info["col_number"] = cur_col + col_left = min(col_left, xmin) + else: + cur_col += 1 + col_left = xmin + info["col_number"] = cur_col + logger.debug(f"Column number: {cur_col}, with column width: {col_width}") + + if cur_col == 1: + # 只有一列,直接返回 + for item in layout_res: + item["col_number"] = 1 + + layout_res.sort( + key=lambda x: (x["col_number"], x["position"][0][1], x["position"][0][0]) + ) + return layout_res diff --git a/pix2text/formula_detector.py b/pix2text/formula_detector.py new file mode 100644 index 0000000000000000000000000000000000000000..13140fa37d0aed1962d9ece3ed4d85abbd828123 --- /dev/null +++ b/pix2text/formula_detector.py @@ -0,0 +1,82 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). +from typing import Optional, Union, Tuple +from pathlib import Path +import logging + +from cnstd.yolo_detector import YoloDetector + +from .consts import AVAILABLE_MODELS +from .utils import data_dir, prepare_model_files + +logger = logging.getLogger(__name__) + + +BACKEND_TO_EXTENSION_MAPPING = { + 'pytorch': 'pt', + 'onnx': 'onnx', + 'coreml': 'mlpackage', + 'torchscript': 'torchscript', +} + + +class MathFormulaDetector(YoloDetector): + def __init__( + self, + *, + model_name: str = 'mfd-1.5', + model_backend: str = 'onnx', + device: Optional[str] = None, + model_path: Optional[Union[str, Path]] = None, + root: Union[str, Path] = data_dir(), + static_resized_shape: Optional[Union[int, Tuple[int, int]]] = None, + **kwargs, + ): + """ + Math Formula Detector based on YOLO. + + Args: + model_name (str): model name, default is 'mfd-1.5'. + model_backend (str): model backend, default is 'onnx'. + device (optional str): device to use, default is None. + model_path (optional str): model path, default is None. + root (optional str): root directory to save model files, default is data_dir(). + static_resized_shape (optional int or tuple): static resized shape, default is None. + When it is not None, the input image will be resized to this shape before detection, + ignoring the input parameter `resized_shape` if .detect() is called. + Some format of models may require a fixed input size, such as CoreML. + **kwargs (): other parameters. + """ + if model_path is None: + model_info = AVAILABLE_MODELS.get_info(model_name, model_backend) + model_path = prepare_model_files(root, model_info) + extension = BACKEND_TO_EXTENSION_MAPPING.get(model_backend, model_backend) + cand_paths = find_files(model_path, f'.{extension}') + if not cand_paths: + raise FileNotFoundError(f'can not find available file in {model_path}') + model_path = cand_paths[0] + logger.info(f'Use model path for MFD: {model_path}') + + super().__init__( + model_path=model_path, + device=device, + static_resized_shape=static_resized_shape, + **kwargs, + ) + + +def find_files(directory, extension): + # 创建Path对象 + dir_path = Path(directory) + + pattern = f"*mfd*{extension}" + + outs = [] + # 使用rglob方法递归查找匹配的文件 + for file_path in dir_path.rglob(pattern): + # 检查文件名是否不以点开头(除了文件扩展名) + if not file_path.name.startswith('.') or file_path.suffix == extension: + outs.append(file_path) + + return outs diff --git a/pix2text/latex_ocr.py b/pix2text/latex_ocr.py new file mode 100644 index 0000000000000000000000000000000000000000..9f77f1a7a0703f4ce6abd1515310d0e83f5e7e15 --- /dev/null +++ b/pix2text/latex_ocr.py @@ -0,0 +1,447 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). +import os +import shutil +import string +from typing import Optional, Union, List, Dict, Any +import logging +from pathlib import Path +import re + +import torch +import tqdm +from optimum.onnxruntime import ORTModelForVision2Seq +from transformers import ( + TrOCRProcessor, + VisionEncoderDecoderModel, +) + +from PIL import Image +from cnocr.utils import get_default_ort_providers +from transformers.generation import ( + GenerateEncoderDecoderOutput, + GenerateBeamEncoderDecoderOutput, +) + +from .consts import MODEL_VERSION, AVAILABLE_MODELS +from .utils import data_dir, select_device, prepare_imgs, prepare_model_files2 + +logger = logging.getLogger(__name__) + + +class LatexOCR(object): + """Get a prediction of a math formula image in the easiest way""" + + def __init__( + self, + *, + model_name: str = 'mfr-1.5', + model_backend: str = 'onnx', + device: str = None, + context: str = None, # deprecated, use `device` instead + model_dir: Optional[Union[str, Path]] = None, + root: Union[str, Path] = data_dir(), + more_processor_configs: Optional[Dict[str, Any]] = None, + more_model_configs: Optional[Dict[str, Any]] = None, + **kwargs, + ): + """Initialize a LatexOCR model. + + Args: + model_name (str, optional): The name of the model. Defaults to 'mfr-1.5'. + model_backend (str, optional): The model backend, either 'onnx' or 'pytorch'. Defaults to 'onnx'. + device (str, optional): What device to use for computation, supports `['cpu', 'cuda', 'gpu']`; defaults to None, which selects the device automatically. + context (str, optional): Deprecated, use `device` instead. What device to use for computation, supports `['cpu', 'cuda', 'gpu']`; defaults to None, which selects the device automatically. + model_dir (Optional[Union[str, Path]], optional): The model file directory. Defaults to None. + root (Union[str, Path], optional): The model root directory. Defaults to data_dir(). + more_processor_configs (Optional[Dict[str, Any]], optional): Additional processor configurations. Defaults to None. + more_model_configs (Optional[Dict[str, Any]], optional): Additional model configurations. Defaults to None. + + - provider (`str`, defaults to `None`, which means to select one provider automatically): + ONNX Runtime provider to use for loading the model. See https://onnxruntime.ai/docs/execution-providers/ for + possible providers. + - session_options (`Optional[onnxruntime.SessionOptions]`, defaults to `None`),: + ONNX Runtime session options to use for loading the model. + - provider_options (`Optional[Dict[str, Any]]`, defaults to `None`): + Provider option dictionaries corresponding to the provider used. See available options + for each provider: https://onnxruntime.ai/docs/api/c/group___global.html . + - ...: see more information here: optimum.onnxruntime.modeling_ort.ORTModel.from_pretrained() + **kwargs: Additional arguments, currently not used. + """ + + if context is not None: + logger.warning(f'`context` is deprecated, please use `device` instead') + if device is None and context is not None: + device = context + self.device = select_device(device) + + model_info = AVAILABLE_MODELS.get_info(model_name, model_backend) + + if model_dir is None: + model_dir = self._prepare_model_files(root, model_backend, model_info) + logger.info(f'Use model dir for LatexOCR: {model_dir}') + + more_model_configs = more_model_configs or {} + if model_backend == 'onnx' and 'provider' not in more_model_configs: + available_providers = get_default_ort_providers() + if not available_providers: + raise RuntimeError( + 'No available providers for ONNX Runtime, please install onnxruntime-gpu or onnxruntime.' + ) + more_model_configs['provider'] = available_providers[0] + self.model, self.processor = self._init_model( + model_backend, + model_dir, + more_processor_config=more_processor_configs, + more_model_config=more_model_configs, + ) + logger.info( + f'Loaded Pix2Text MFR model {model_name} to: backend-{model_backend}, device-{self.device}' + ) + + def _prepare_model_files(self, root, model_backend, model_info): + model_root_dir = Path(root) / MODEL_VERSION + model_dir = model_root_dir / model_info['local_model_id'] + if model_dir.is_dir() and list(model_dir.glob('**/[!.]*')): + return str(model_dir) + assert 'hf_model_id' in model_info + model_dir = prepare_model_files2( + model_fp_or_dir=model_dir, + remote_repo=model_info["hf_model_id"], + file_or_dir="dir", + ) + return model_dir + + def _init_model( + self, + model_backend, + model_dir, + more_processor_config=None, + more_model_config=None, + ): + more_processor_config = more_processor_config or {} + more_model_config = more_model_config or {} + processor = TrOCRProcessor.from_pretrained(model_dir, **more_processor_config) + if model_backend == 'pytorch': + model = VisionEncoderDecoderModel.from_pretrained( + model_dir, **more_model_config + ) + model.to(self.device) + model.eval() + else: + if 'use_cache' not in more_model_config: + more_model_config['use_cache'] = False + if ( + 'provider' in more_model_config + and more_model_config['provider'] == 'CUDAExecutionProvider' + ): + more_model_config["use_io_binding"] = more_model_config['use_cache'] + model = ORTModelForVision2Seq.from_pretrained( + model_dir, **more_model_config + ) + model.to(self.device) + return model, processor + + def __call__(self, *args, **kwargs) -> Union[str, List[str]]: + return self.recognize(*args, **kwargs) + + def recognize( + self, + imgs: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]], + batch_size: int = 1, + use_post_process: bool = True, + rec_config: Optional[dict] = None, + **kwargs, + ) -> Union[Dict[str, Any], List[Dict[str, Any]]]: + """ + Recognize Math Formula images to LaTeX Expressions + Args: + imgs (Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]): The image or list of images + batch_size (int): The batch size + use_post_process (bool): Whether to use post process. Defaults to True + rec_config (Optional[dict]): The generation config + **kwargs (): Other arguments. Not used for now + + Returns: The LaTeX Result or list of LaTeX Results; each result is a dict with `text` and `score` fields. + + """ + is_single_image = False + if isinstance(imgs, (str, Path, Image.Image)): + imgs = [imgs] + is_single_image = True + + input_imgs = prepare_imgs(imgs) + + # inference batch by batch + results = [] + for i in tqdm.tqdm(range(0, len(input_imgs), batch_size)): + part_imgs = input_imgs[i : i + batch_size] + results.extend(self._one_batch(part_imgs, rec_config, **kwargs)) + + if use_post_process: + for info in results: + info['text'] = self.post_process(info['text']) + + if is_single_image: + return results[0] + return results + + def _one_batch(self, img_list, rec_config, **kwargs): + rec_config = rec_config or {} + pixel_values = self.processor(images=img_list, return_tensors="pt").pixel_values + outs = self.model.generate( + pixel_values.to(self.device), + return_dict_in_generate=True, + output_scores=True, + **rec_config, + ) + mean_probs = self._cal_scores(outs) + + generated_text = self.processor.batch_decode( + outs.sequences, skip_special_tokens=True + ) + assert len(img_list) == len(generated_text) == len(mean_probs) + + final_out = [] + for text, prob in zip(generated_text, mean_probs): + final_out.append({'text': text, 'score': prob}) + return final_out + + def _cal_scores(self, outs): + if isinstance(outs, GenerateBeamEncoderDecoderOutput): + mean_probs = outs.sequences_scores.exp().tolist() + elif isinstance(outs, GenerateEncoderDecoderOutput): + logits = torch.stack(outs.scores, dim=1) + scores = torch.softmax(logits, dim=-1).max(dim=2).values + + mean_probs = [] + for idx, example in enumerate(scores): + cur_length = int( + (outs.sequences[idx] != self.processor.tokenizer.pad_token_id).sum() + ) + assert cur_length > 1 + # 获得几何平均值。注意:example中的第一个元素对应sequence中的第二个元素 + mean_probs.append( + float((example[: cur_length - 1] + 1e-8).log().mean().exp()) + ) + else: + raise Exception(f'unprocessed output type: {type(outs)}') + + return mean_probs + + def post_process(self, text): + text = remove_redundant_script(text) + text = remove_trailing_whitespace(text) + text = replace_illegal_symbols(text) + for _ in range(10): + new_text = remove_empty_text(text) + if new_text == text: + break + text = new_text + + text = fix_latex(text) + text = remove_unnecessary_spaces(text) + return text.strip() + + +def remove_redundant_script(text): + # change '^ { abc }' to 'abc' + pattern = r'^\^\s*{\s*(.*?)\s*}' + result = re.sub(pattern, r'\1', text) + # change '_ { abc }' to 'abc' + pattern = r'^_\s*{\s*(.*?)\s*}' + result = re.sub(pattern, r'\1', result) + return result.strip() + + +def replace_illegal_symbols(text): + illegal_to_legals = [ + (r'\\\.', r'\\ .'), # \. -> \ . + (r'\\=', r'\\ ='), # \= -> \ = + (r'\\-', r'\\ -'), # \- -> \ - + (r'\\~', r'\\ ~'), # \~ -> \ ~ + ] + for illegal, legal in illegal_to_legals: + text = re.sub(illegal, legal, text) + return text + + +def remove_empty_text(latex_expression): + # change 'abc ^' to 'abc' + patterns = [ + r'\\hat\s*{\s*}', # 匹配 \hat{} + r'\^\s*{\s*}', # 匹配 ^{} + r'_\s*{\s*}', # 匹配 _{} + r'\\text\s*{\s*}', # 匹配 \text{} + r'\\tilde\s*{\s*}', # 匹配 \tilde{} + r'\\bar\s*{\s*}', # 匹配 \bar{} + r'\\vec\s*{\s*}', # 匹配 \vec{} + r'\\acute\s*{\s*}', # 匹配 \acute{} + r'\\grave\s*{\s*}', # 匹配 \grave{} + r'\\breve\s*{\s*}', # 匹配 \breve{} + r'\\overline\s*{\s*}', # 匹配 \overline{} + r'\\dot\s*{\s*}', # 匹配 \dot{} + r'\\ddot\s*{\s*}', # 匹配 \ddot{} + r'\\widehat\s*{\s*}', # 匹配 \widehat{} + r'\\widetilde\s*{\s*}', # 匹配 \widetilde{} + ] + + # 使用 sub 函数进行替换 + for pattern in patterns: + latex_expression = re.sub(pattern, '', latex_expression) + return latex_expression.strip() + + +latex_whitespace_symbols = [ + r'\\ +', # 空格 + r'\\quad\s*', # 1em 宽度的空白 + r'\\qquad\s*', # 2em 宽度的空白 + r'\\,\s*', # 窄空格 + r'\\:\s*', # 中等空格 + r'\\;\s*', # 大空格 + r'\\enspace\s*', # 1em 空白 + r'\\thinspace\s*', # 1/6em 空白 + r'\\!\s*', # 感叹号 +] + + +def remove_trailing_whitespace(latex_str): + # 定义匹配末尾空白符号的正则表达式模式 + trailing_whitespace_pattern = r'(?:' + '|'.join(latex_whitespace_symbols) + r')+$' + + # 使用 sub 函数进行替换 + return re.sub(trailing_whitespace_pattern, '', latex_str).strip() + + +def remove_unnecessary_spaces(latex_str): + # Preserve space between a command and a following uppercase letter + latex_str = re.sub(r'\\([a-zA-Z]+) (?=[a-zA-Z])', r'\\\1 ', latex_str) + + # Remove spaces after commands not followed by an uppercase letter, carefully not affecting commands that require space + latex_str = re.sub(r'\\([a-zA-Z]+)\s+(?![a-zA-Z])', r'\\\1', latex_str) + + # Remove spaces around curly braces, preserving internal spaces + latex_str = re.sub(r'(\{)\s+', r'\1', latex_str) + latex_str = re.sub(r'\s+(\})', r'\1', latex_str) + + # Specifically target and remove spaces around mathematical operators, including +, -, =, and similar operators + latex_str = re.sub(r'(?<=[^\\])\s*([+\-=])\s*', r'\1', latex_str) + + # Remove spaces around "^" and "_" for subscripts and superscripts + latex_str = re.sub(r'\s*(\^|\_)\s*', r'\1', latex_str) + + return latex_str + + +def find_all_left_or_right(latex, left_or_right='left'): + left_bracket_infos = [] + prefix_len = len(left_or_right) + 1 + # 匹配出latex中所有的 '\left' 后面跟着的第一个非空格字符,定位它们所在的位置 + for m in re.finditer(rf'\\{left_or_right}\s*\S', latex): + start, end = m.span() + if latex[end - 1] in string.ascii_letters: + continue + # 如果最后一个字符为 "\",则往前继续匹配,直到匹配到一个非字母的字符 + # 如 "\left \big(" + while latex[end - 1] in ('\\', ' '): + end += 1 + while end < len(latex) and latex[end].isalpha(): + end += 1 + ori_str = latex[start + prefix_len : end].strip() + # FIXME: ori_str中可能出现多个 '\left',此时需要分隔开 + + left_bracket_infos.append({'str': ori_str, 'start': start, 'end': end}) + left_bracket_infos.sort(key=lambda x: x['start']) + return left_bracket_infos + + +def match_left_right(left_str, right_str): + """匹配左右括号,如匹配 `\left(` 和 `\right)`。""" + left_str = left_str.strip().replace(' ', '')[len('left') + 1 :] + right_str = right_str.strip().replace(' ', '')[len('right') + 1 :] + # 去掉开头的相同部分 + while left_str and right_str and left_str[0] == right_str[0]: + left_str = left_str[1:] + right_str = right_str[1:] + + match_pairs = [ + ('', ''), + ('(', ')'), + ('\{', '.'), # 大括号那种 + ('⟮', '⟯'), + ('[', ']'), + ('⟨', '⟩'), + ('{', '}'), + ('⌈', '⌉'), + ('┌', '┐'), + ('⌊', '⌋'), + ('└', '┘'), + ('⎰', '⎱'), + ('lt', 'gt'), + ('lang', 'rang'), + (r'langle', r'rangle'), + (r'lbrace', r'rbrace'), + ('lBrace', 'rBrace'), + (r'lbracket', r'rbracket'), + (r'lceil', r'rceil'), + ('lcorner', 'rcorner'), + (r'lfloor', r'rfloor'), + (r'lgroup', r'rgroup'), + (r'lmoustache', r'rmoustache'), + (r'lparen', r'rparen'), + (r'lvert', r'rvert'), + (r'lVert', r'rVert'), + ] + return (left_str, right_str) in match_pairs + + +def fix_latex(latex: str) -> str: + """对识别结果做进一步处理和修正。""" + # # 把latex中的中文括号全部替换成英文括号 + # latex = latex.replace('(', '(').replace(')', ')') + # # 把latex中的中文逗号全部替换成英文逗号 + # latex = latex.replace(',', ',') + + left_bracket_infos = find_all_left_or_right(latex, left_or_right='left') + right_bracket_infos = find_all_left_or_right(latex, left_or_right='right') + # left 和 right 找配对,left找位置比它靠前且最靠近他的right配对 + for left_bracket_info in left_bracket_infos: + for right_bracket_info in right_bracket_infos: + if ( + not right_bracket_info.get('matched', False) + and right_bracket_info['start'] > left_bracket_info['start'] + and match_left_right( + right_bracket_info['str'], left_bracket_info['str'] + ) + ): + left_bracket_info['matched'] = True + right_bracket_info['matched'] = True + break + + for left_bracket_info in left_bracket_infos: + # 把没有匹配的 '\left'替换为等长度的空格 + left_len = len('left') + 1 + if not left_bracket_info.get('matched', False): + start_idx = left_bracket_info['start'] + end_idx = start_idx + left_len + latex = ( + latex[: left_bracket_info['start']] + + ' ' * (end_idx - start_idx) + + latex[end_idx:] + ) + for right_bracket_info in right_bracket_infos: + # 把没有匹配的 '\right'替换为等长度的空格 + right_len = len('right') + 1 + if not right_bracket_info.get('matched', False): + start_idx = right_bracket_info['start'] + end_idx = start_idx + right_len + latex = ( + latex[: right_bracket_info['start']] + + ' ' * (end_idx - start_idx) + + latex[end_idx:] + ) + + # 把 latex 中的连续空格替换为一个空格 + latex = re.sub(r'\s+', ' ', latex) + return latex.strip() diff --git a/pix2text/layout_parser.py b/pix2text/layout_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..8b48f0b88da8d4b024e69e8dd9fbbe78a0ef0d76 --- /dev/null +++ b/pix2text/layout_parser.py @@ -0,0 +1,130 @@ +# coding: utf-8 +from enum import Enum +from pathlib import Path +from typing import Union, Optional, List, Dict, Any + +from PIL import Image +from cnstd import LayoutAnalyzer +from cnstd.yolov7.consts import CATEGORY_DICT + +from .utils import read_img, save_layout_img, select_device + + +class ElementType(Enum): + ABANDONED = -2 # 可以指定有些区域不做识别,如 Image 与 Image caption 中间地带 + IGNORED = -1 + UNKNOWN = 0 + TEXT = 1 + TITLE = 2 + FIGURE = 3 + TABLE = 4 + FORMULA = 5 + PLAIN_TEXT = 11 # 与 TEXT 类似,但是绝对不包含公式 + + def __repr__(self) -> str: + return self.name + + def __str__(self) -> str: + return self.name + +class LayoutParser(object): + def __init__( + self, + model_type: str = 'yolov7_tiny', # 当前仅支持 `yolov7_tiny` + model_backend: str = 'pytorch', # 当前仅支持 `pytorch` + device: str = None, + **kwargs + ): + device = select_device(device) + device = device if device != 'mps' else 'cpu' + self.layout_model = LayoutAnalyzer( + model_name='layout', + model_type=model_type, + model_backend=model_backend, + device=device, + **kwargs, + ) + self.ignored_types = {'_background_', 'Footer', 'Reference'} + self.type_mappings = { + 'Header': ElementType.TEXT, + 'Text': ElementType.TEXT, + 'Title': ElementType.TITLE, + 'Figure': ElementType.FIGURE, + 'Figure caption': ElementType.TEXT, + 'Table': ElementType.TABLE, + 'Table caption': ElementType.TEXT, + 'Reference': ElementType.TEXT, + 'Equation': ElementType.FORMULA, + } + + @classmethod + def from_config(cls, configs: Optional[dict] = None, device: str = None, **kwargs): + configs = configs or {} + device = select_device(device) + configs['device'] = device if device != 'mps' else 'cpu' + + return cls( + model_type=configs.get('model_type', 'yolov7_tiny'), + model_backend=configs.get('model_backend', 'pytorch'), + device=device, + **kwargs, + ) + + def __call__(self, *args, **kwargs): + return self.parse(*args, **kwargs) + + def parse( + self, + img: Union[str, Path, Image.Image], + resized_shape: int = 608, + table_as_image: bool = False, + **kwargs + ) -> (List[Dict[str, Any]], Dict[str, Any]): + """ + + Args: + img (): + resized_shape (): + table_as_image (): + **kwargs (): + + Returns: parsed results & column meta information; + the parsed results is a list of dict with keys: 'type', 'position', 'score': + * type: ElementType + * position: np.ndarray, with shape of (4, 2) + * score: float + the column meta is a dict, with column number as its keys. + + """ + if isinstance(img, Image.Image): + img0 = img.convert('RGB') + else: + img0 = read_img(img, return_type='Image') + layout_out = self.layout_model(img0.copy(), resized_shape=resized_shape) + + if kwargs.get('save_layout_res'): + save_layout_img( + img0, + CATEGORY_DICT['layout'], + layout_out, + kwargs.get('save_layout_res'), + key='box', + ) + + final_out = [] + for box_info in layout_out: + image_type = box_info['type'] + if image_type in self.ignored_types: + continue + image_type = self.type_mappings.get(image_type, image_type) + if table_as_image and image_type == ElementType.TABLE: + image_type = ElementType.FIGURE + final_out.append( + { + 'type': image_type, + 'position': box_info['box'], + 'score': box_info['score'], + } + ) + + return final_out, {} diff --git a/pix2text/ocr_engine.py b/pix2text/ocr_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..cbdb27e0b3c30ad8a0822b1b9a532d5f3d2dd46b --- /dev/null +++ b/pix2text/ocr_engine.py @@ -0,0 +1,204 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). +import string +from typing import Sequence, List, Optional + +import numpy as np +import cv2 + +from .utils import custom_deepcopy + + +def clip(x, min_value, max_value): + return min(max(x, min_value), max_value) + + +class TextOcrEngine: + """Text OCR Engine Wrapper""" + + name = 'unknown' + + def __init__(self, languages: Sequence[str], ocr_engine): + self.languages = languages + self.ocr_engine = ocr_engine + + def detect_only(self, img: np.ndarray, **kwargs): + """ + Only detect the texts from the input image. + Args: + img (np.ndarray): RGB image with shape: (height, width, 3) + kwargs: more configs + + Returns: + Dict[str, List[dict]]: The dictionary contains the following keys: + * 'detected_texts': list, each element stores the information of a detected box, recorded in a dictionary, including the following values: + 'position': The rectangular box corresponding to the detected text; np.ndarray, shape: (4, 2), representing the coordinates (x, y) of the 4 points of the box; + + Example: + {'detected_texts': + [{'position': array([[416, 77], + [486, 13], + [800, 325], + [730, 390]], dtype=int32), + }, + ... + ] + } + """ + pass + + def recognize_only(self, img: np.ndarray, **kwargs): + """ + Only recognize the texts for cropped images, which are from bboxes detected by detect_only. + Args: + img (): RGB image with shape [height, width] or [height, width, channel]. + channel should be 1 (gray image) or 3 (RGB formatted color image). scaled in [0, 255]; + kwargs: more configs + + Returns: + dict, with keys: + - 'text' (str): The recognized text + - 'score' (float): The score of the recognition result (confidence level), ranging from `[0, 1]`; the higher the score, the more reliable it is + + Example: + ``` + {'score': 0.8812797665596008, + 'text': 'Current Line'} + ``` + """ + pass + + def ocr(self, img: np.ndarray, rec_config: Optional[dict] = None, **kwargs): + """ + Detect texts first, and then recognize the texts for detected bbox patches. + Args: + img (np.ndarray): RGB image with shape [height, width] or [height, width, channel]. + channel should be 1 (gray image) or 3 (RGB formatted color image). scaled in [0, 255]; + rec_config (Optional[dict]): The config for recognition + kwargs: more configs + + Returns: + list of detected texts, which element is a dict, with keys: + - 'text' (str): The recognized text + - 'score' (float): The score of the recognition result (confidence level), ranging from `[0, 1]`; the higher the score, the more reliable it is + - 'position' (np.ndarray): 4 x 2 array, representing the coordinates (x, y) of the 4 points of the box + + Example: + ``` + [{'score': 0.88, + 'text': 'Line 1', + 'position': array([[146, 22], + [179, 22], + [179, 60], + [146, 60]], dtype=int32) + }, + {'score': 0.78, + 'text': 'Line 2' + 'position': array([[641, 115], + [1180, 115], + [1180, 244], + [641, 244]], dtype=int32) + }] + ``` + """ + + pass + + +class CnOCREngine(TextOcrEngine): + name = 'cnocr' + + def detect_only(self, img: np.ndarray, **kwargs): + outs = self.ocr_engine.det_model.detect(img, **kwargs) + for out in outs['detected_texts']: + out['position'] = out.pop('box') + return outs + + def recognize_only(self, img: np.ndarray, **kwargs): + try: + return self.ocr_engine.ocr_for_single_line(img) + except: + return {'text': '', 'score': 0.0} + + def ocr(self, img: np.ndarray, rec_config: Optional[dict] = None, **kwargs) -> str: + rec_config = rec_config or {} + outs = self.ocr_engine.ocr(img, **rec_config) + return outs + + +class EasyOCREngine(TextOcrEngine): + name = 'easyocr' + + def detect_only(self, img: np.ndarray, **kwargs): + if 'resized_shape' in kwargs: + kwargs.pop('resized_shape') + img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + height, width = img.shape[:2] + horizontal_list, free_list = self.ocr_engine.detect(img, **kwargs) + horizontal_list, free_list = horizontal_list[0], free_list[0] + bboxes = [] + for x1x2_y1y2 in horizontal_list: + xmin, xmax, ymin, ymax = x1x2_y1y2 + xmin = clip(xmin, 0, width) + xmax = clip(xmax, 0, width) + ymin = clip(ymin, 0, height) + ymax = clip(ymax, 0, height) + box = np.array([[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]) + bboxes.append({'position': box}) + for bbox in free_list: + if bbox: + bboxes.append({'position': np.array(bbox)}) + return {'detected_texts': bboxes} + + def recognize_only(self, img: np.ndarray, **kwargs) -> dict: + out = {'text': '', 'score': 0.0} + try: + img = cv2.cvtColor(img, cv2.COLOR_RGBA2GRAY) + result = self.ocr_engine.recognize(img, **kwargs) + if result: + out = {'text': result[0][1], 'score': result[0][2]} + except: + pass + return out + + def ocr( + self, img: np.ndarray, rec_config: Optional[dict] = None, **kwargs + ) -> List[dict]: + rec_config = rec_config or {} + img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + results = self.ocr_engine.readtext(img, **rec_config) + outs = [] + for result in results: + outs.append( + {'text': result[1], 'score': result[2], 'position': np.array(result[0])} + ) + return outs + + +def prepare_ocr_engine(languages: Sequence[str], ocr_engine_config): + ocr_engine_config = custom_deepcopy(ocr_engine_config) if ocr_engine_config else {} + if len(set(languages).difference({'en', 'ch_sim'})) == 0: + from cnocr import CnOcr + + # if 'ch_sim' not in languages and 'cand_alphabet' not in ocr_engine_config: # only recognize english characters + # ocr_engine_config['cand_alphabet'] = list(string.printable) + [''] + if tuple(languages) == ('en',): # only recognize english characters + if 'det_model_name' not in ocr_engine_config: + ocr_engine_config['det_model_name'] = 'en_PP-OCRv3_det' + if 'rec_model_name' not in ocr_engine_config: + ocr_engine_config['rec_model_name'] = 'en_PP-OCRv3' + ocr_engine = CnOcr(**ocr_engine_config) + engine_wrapper = CnOCREngine(languages, ocr_engine) + else: + try: + from easyocr import Reader + except: + raise ImportError('Please install easyocr first: pip install easyocr') + gpu = False + if 'context' in ocr_engine_config: + context = ocr_engine_config.pop('context').lower() + gpu = 'gpu' in context or 'cuda' in context + ocr_engine = Reader(lang_list=list(languages), gpu=gpu, **ocr_engine_config) + engine_wrapper = EasyOCREngine(languages, ocr_engine) + return engine_wrapper diff --git a/pix2text/page_elements.py b/pix2text/page_elements.py new file mode 100644 index 0000000000000000000000000000000000000000..5e1c3dbc1ec4b63b331e951cba2b77f10f0f24a9 --- /dev/null +++ b/pix2text/page_elements.py @@ -0,0 +1,405 @@ +# coding: utf-8 +import dataclasses +from copy import deepcopy +from pathlib import Path +import re +from typing import Sequence, Any, Union, Optional + +from PIL import Image + +from .table_ocr import visualize_cells +from .utils import merge_line_texts, smart_join, y_overlap, list2box +from .layout_parser import ElementType + +FORMULA_TAG = '^[(\(]\d+(\.\d+)*[)\)]$' + + +@dataclasses.dataclass +class Element(object): + id: str + box: Sequence + text: Optional[str] + meta: Any + type: ElementType + total_img: Image.Image + isolated: bool = False + col_number: int = -1 + score: float = 1.0 + spellchecker = None + kwargs: dict = dataclasses.field(default_factory=dict) + + def __init__( + self, + *, + id: str, + box: Sequence, + isolated: bool, + col_number: int, + meta: Any, + type: ElementType, + total_img: Image.Image, + score: float, + text: Optional[str] = None, + spellchecker=None, + configs: Optional[dict] = None, + ): + self.total_img = total_img + self.id = id + self.box = box + self.isolated = isolated + self.col_number = col_number + self.meta = meta + self.type = type + self.score = score + self.spellchecker = spellchecker + self.kwargs = configs or {} + + if self.meta is not None and text is None: + self.text = self._meta_to_text() + else: + self.text = text + + if self.isolated: + self.text = self.text + '\n' + + def to_dict(self): + return dataclasses.asdict(self) + + def _meta_to_text(self) -> str: + if self.type in (ElementType.TEXT, ElementType.TITLE): + embed_sep = self.kwargs.get('embed_sep', (' $', '$ ')) + isolated_sep = self.kwargs.get('isolated_sep', ('$$\n', '\n$$')) + line_sep = self.kwargs.get('line_sep', '\n') + auto_line_break = self.kwargs.get('auto_line_break', True) + if self.type == ElementType.TITLE: + for box_info in self.meta: + if box_info.get('type', 'text') == 'isolated': + box_info['type'] = 'embedding' + outs = merge_line_texts( + self.meta, + auto_line_break, + line_sep, + embed_sep, + isolated_sep, + self.spellchecker, + ) + if self.type == ElementType.TITLE: + outs = smart_join(outs.split('\n'), self.spellchecker) + elif self.type == ElementType.FORMULA: + if isinstance(self.meta, dict): + outs = self.meta['text'] + elif isinstance(self.meta, list): + outs = [one['text'] for one in self.meta] + elif self.type == ElementType.TABLE: + outs = '\n'.join(self.meta.get('markdown', [])) + else: + outs = '' + + return outs + + def __repr__(self) -> str: + return f"Element({self.to_dict()})" + + def __str__(self) -> str: + return self.__repr__() + + def __lt__(self, other) -> bool: + """ + Adapted from https://github.com/SVJLucas/Scanipy/blob/main/scanipy/elements/element.py. + Less than operator for Element objects. + + Args: + other (Element): Another Element object. + + Returns: + bool: True if this Element is "less than" the other, False otherwise. + + Raises: + TypeError: If 'other' is not an instance or subclass of Element. + """ + + if not isinstance(other, Element): + raise TypeError("other must be an instance or subclass of Element") + + return self._column_before(other) or ( + self._same_column(other) and (self.box[1] < other.box[1]) + ) + + def _column_before(self, other) -> bool: + """ + Adapted from https://github.com/SVJLucas/Scanipy/blob/main/scanipy/elements/element.py. + Check if this Element is in a column before the other Element. + + Args: + other (Element): Another Element object. + + Returns: + bool: True if in a column before, False otherwise. + """ + + if not isinstance(other, Element): + raise TypeError("other must be an instance or subclass of Element") + + return self.col_number < other.col_number + # max_width = max(box_width(self.box), box_width(other.box)) + # return self.box[0] < other.box[0] - max_width / 2 + + def _same_column(self, other) -> bool: + """ + Check if this Element is in the same column as the other Element. + + Args: + other (Element): Another Element object. + + Returns: + bool: True if in the same column, False otherwise. + """ + + if not isinstance(other, Element): + raise TypeError("other must be an instance or subclass of ElementOutput") + + return self.col_number == other.col_number + # max_width = max(box_width(self.box), box_width(other.box)) + # return abs(self.box[0] - other.box[0]) < max_width / 2 + + +def box_width(box): + return box[2] - box[0] + + +class Page(object): + number: int + id: str + elements: Sequence[Element] + config: dict + spellchecker = None + + def __init__( + self, + *, + number: int, + elements: Sequence[Element], + id: Optional[str] = None, + spellchecker=None, + config=None, + ): + self.number = number + self.id = id or str(number) + self.elements = elements + self.spellchecker = spellchecker + self.config = config or {} + + def __repr__(self) -> str: + return f"Page(id={self.id}, number={self.number}, elements={self.elements})" + + def to_markdown( + self, + out_dir: Union[str, Path], + root_url: Optional[str] = None, + markdown_fn: Optional[str] = 'output.md', + ) -> str: + """ + Convert the Page to markdown. + Args: + out_dir (Union[str, Path]): The output directory. + root_url (Optional[str]): The root url for the saved images in the markdown files. + markdown_fn (Optional[str]): The markdown file name. Default is 'output.md'. + + Returns: The markdown string. + + """ + out_dir = Path(out_dir) + out_dir.mkdir(exist_ok=True, parents=True) + self.elements = self._merge_isolated_formula_and_tag(self.elements) + self.elements.sort() + if not self.elements: + return '' + md_out = self._ele_to_markdown(self.elements[0], root_url, out_dir) + for idx, element in enumerate(self.elements[1:]): + prev_element = self.elements[idx] + cur_txt = self._ele_to_markdown(element, root_url, out_dir) + if ( + prev_element.col_number + 1 == element.col_number + and prev_element.type == element.type + and prev_element.type in (ElementType.TEXT, ElementType.TITLE) + and md_out + and md_out[-1] != '\n' + and cur_txt + and cur_txt[0] != '\n' + ): + # column continuation + md_out = smart_join([md_out, cur_txt], self.spellchecker) + else: + md_out += '\n\n' + cur_txt + + line_sep = '\n' + md_out = re.sub( + rf'{line_sep}{{2,}}', f'{line_sep}{line_sep}', md_out + ) # 把2个以上的连续 '\n' 替换为 '\n\n' + if markdown_fn: + with open(out_dir / markdown_fn, 'w', encoding='utf-8') as f: + f.write(md_out) + return md_out + + def _ele_to_markdown( + self, element: Element, root_url: Optional[str], out_dir: Union[str, Path] + ): + type = element.type + text = element.text + if type in (ElementType.TEXT, ElementType.TABLE): + if type == ElementType.TABLE: + visualize_cells( + element.total_img.crop(element.box), + element.meta['cells'][0], + out_dir / f'{element.id}.png', + ) + return text + elif type == ElementType.TITLE: + return f'## {text}' + elif type == ElementType.FORMULA: + isolated_sep = self.config.get('isolated_sep', ('$$\n', '\n$$')) + return isolated_sep[0] + text.strip() + isolated_sep[1] + elif type == ElementType.FIGURE: + out_figure_dir = out_dir / 'figures' + out_figure_dir.mkdir(exist_ok=True, parents=True) + out_path = out_figure_dir / f'{element.id}-{type.name}.jpg' + element.total_img.crop(element.box).save(str(out_path)) + + _url = self._map_path_to_url(root_url, out_path, out_dir) + return f'![]({_url})' + return '' + + def _map_path_to_url(self, root_url: Optional[str], path: Path, out_dir: Path): + rel_url = path.relative_to(out_dir) + if root_url is not None: + return f'{root_url}/{rel_url}' + return str(rel_url) + + @classmethod + def _merge_isolated_formula_and_tag(cls, elements): + # 合并孤立的公式和公式标题 + # 对于每个公式标题,找到与它在同一行且在其左侧距离最近的孤立公式,并把它们合并 + isolated_formula = [ + item + for item in elements + if item.type == ElementType.FORMULA and item.isolated + ] + formula_caption = [ + item + for item in elements + if item.type == ElementType.TEXT and re.match(FORMULA_TAG, item.text) + ] + ele_ids = set([item.id for item in isolated_formula + formula_caption]) + remaining_elements = [item for item in elements if item.id not in ele_ids] + for caption in formula_caption: + caption_xmin, caption_ymin, caption_xmax, caption_ymax = caption.box + min_dist = float('inf') + nearest_formula = None + for formula in isolated_formula: + formula_xmin, formula_ymin, formula_xmax, formula_ymax = formula.box + if ( + caption.col_number == formula.col_number + and y_overlap( + list2box(*caption.box), list2box(*formula.box), key=None + ) + >= 0.8 + ): + dist = caption_xmin - formula_xmax + if 0 <= dist < min_dist: + min_dist = dist + nearest_formula = formula + if nearest_formula is not None: + new_formula = deepcopy(nearest_formula) + formula_xmin, formula_ymin, formula_xmax, formula_ymax = new_formula.box + new_formula.box = [ + min(caption_xmin, formula_xmin), + min(caption_ymin, formula_ymin), + max(caption_xmax, formula_xmax), + max(caption_ymax, formula_ymax), + ] + new_text = new_formula.text.strip() + ' \\tag{{{}}}'.format( + caption.text[1:-1] + ) + new_formula.text = new_text + if new_formula.meta and isinstance(new_formula.meta, dict): + new_formula.meta['text'] = new_text + remaining_elements.append(new_formula) + isolated_formula.remove(nearest_formula) + else: # not found + remaining_elements.append(caption) + return remaining_elements + isolated_formula + + +class Document(object): + number: int + id: str + pages: Sequence[Page] + config: dict + spellchecker = None + + def __init__( + self, + *, + number: int, + pages: Sequence[Page], + id: Optional[str] = None, + spellchecker=None, + config=None, + ): + self.number = number + self.id = id or str(number) + self.pages = pages + self.spellchecker = spellchecker + self.config = config or {} + + def __repr__(self) -> str: + return f"Document(id={self.id}, number={self.number}, pages={self.pages})" + + def to_markdown( + self, + out_dir: Union[str, Path], + root_url: Optional[str] = None, + markdown_fn: Optional[str] = 'output.md', + ) -> str: + """ + Convert the Document to markdown. + Args: + out_dir (Union[str, Path]): The output directory. + root_url (Optional[str]): The root url for the saved images in the markdown files. + markdown_fn (Optional[str]): The markdown file name. Default is 'output.md'. + + Returns: The markdown string. + + """ + out_dir = Path(out_dir) + out_dir.mkdir(exist_ok=True, parents=True) + self.pages.sort(key=lambda page: page.number) + if not self.pages: + return '' + md_out = self.pages[0].to_markdown(out_dir, root_url=root_url, markdown_fn=None) + for idx, page in enumerate(self.pages[1:]): + prev_page = self.pages[idx] + cur_txt = page.to_markdown(out_dir, root_url=root_url, markdown_fn=None) + if ( + md_out + and prev_page.elements + and prev_page.elements[-1].type in (ElementType.TEXT, ElementType.TITLE) + and page.elements + and page.elements[0].type in (ElementType.TEXT, ElementType.TITLE) + and md_out[-1] != '\n' + and cur_txt + and cur_txt[0] != '\n' + ): + # column continuation + md_out = smart_join([md_out, cur_txt], self.spellchecker) + else: + md_out += '\n\n' + cur_txt + + line_sep = '\n' + md_out = re.sub( + rf'{line_sep}{{2,}}', f'{line_sep}{line_sep}', md_out + ) # 把2个以上的连续 '\n' 替换为 '\n\n' + if markdown_fn: + with open(out_dir / markdown_fn, 'w', encoding='utf-8') as f: + f.write(md_out) + return md_out diff --git a/pix2text/pix_to_text.py b/pix2text/pix_to_text.py new file mode 100644 index 0000000000000000000000000000000000000000..8cd995022d7056124cedd50e25fc12b2d774d9d7 --- /dev/null +++ b/pix2text/pix_to_text.py @@ -0,0 +1,697 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). +import logging +import io +import os +from copy import deepcopy +from functools import cmp_to_key +from pathlib import Path +from typing import Dict, Any, Optional, Union, List, Literal + +import numpy as np +from PIL import Image +import fitz # PyMuPDF + +from .utils import ( + select_device, + box2list, + read_img, + add_img_margin, + get_background_color, + x_overlap, + list2box, + merge_line_texts, +) +from .layout_parser import LayoutParser, ElementType +# from .layoutlmv3 import LayoutLMv3LayoutParser +from .text_formula_ocr import TextFormulaOCR +from .table_ocr import TableOCR +from .page_elements import Element, Page, Document + +logger = logging.getLogger(__name__) + + +def prepare_layout_engine(layout_config: Optional[Dict[str, Any]], device: str = None) -> LayoutParser: + layout_config = layout_config or {} + kls = layout_config.pop('model_type', 'DocYoloLayoutParser') + if kls == 'DocXLayoutParser': + from .doc_xl_layout import DocXLayoutParser + + layout_engine = DocXLayoutParser.from_config(layout_config, device=device) + # elif kls == 'LayoutLMv3LayoutParser': + # layout_engine = LayoutLMv3LayoutParser.from_config(layout_config, device=device) + elif kls == 'DocYoloLayoutParser': + from .doc_yolo_layout_parser import DocYoloLayoutParser + + layout_engine = DocYoloLayoutParser.from_config(layout_config, device=device) + else: + raise ValueError(f'Unsupported layout parser: {kls}') + return layout_engine + + +def prepare_text_formula_ocr_engine( + text_formula_config, enable_formula: bool, device: str, **kwargs +): + text_formula_config = text_formula_config or {} + kls = text_formula_config.pop('model_type', 'TextFormulaOCR') + if kls == 'TextFormulaOCR': + text_formula_ocr = TextFormulaOCR.from_config( + text_formula_config, enable_formula=enable_formula, device=device, **kwargs + ) + elif kls == 'VlmTextFormulaOCR': + from .text_formula_ocr import VlmTextFormulaOCR + + assert "model_name" in text_formula_config, "VlmTextFormulaOCR requires model_name" + assert "api_key" in text_formula_config, "VlmTextFormulaOCR requires api_key" + text_formula_ocr = VlmTextFormulaOCR.from_config( + model_name=text_formula_config["model_name"], + api_key=text_formula_config["api_key"], + **kwargs, + ) + else: + raise ValueError(f'Unsupported text formula OCR: {kls}') + return text_formula_ocr + + +def prepare_table_ocr_engine( + table_config, + device: str, + text_formula_ocr: Optional[TextFormulaOCR] = None, + **kwargs, +): + table_config = table_config or {} + kls = table_config.pop('model_type', 'TableOCR') + if kls == 'TableOCR': + if text_formula_ocr is None: + raise ValueError("text_formula_ocr must be provided for table OCR engine preparation.") + + table_ocr = TableOCR.from_config( + text_formula_ocr.text_ocr, + text_formula_ocr.spellchecker, + table_config, + device=device, + ) + elif kls == 'VlmTableOCR': + from .vlm_table_ocr import VlmTableOCR + + assert "model_name" in table_config, "VlmTableOCR requires model_name" + assert "api_key" in table_config, "VlmTableOCR requires api_key" + table_ocr = VlmTableOCR.from_config( + model_name=table_config["model_name"], + api_key=table_config["api_key"], + **kwargs, + ) + else: + raise ValueError(f'Unsupported table OCR: {kls}') + return table_ocr + + +class Pix2Text(object): + # MODEL_FILE_PREFIX = 'pix2text-v{}'.format(MODEL_VERSION) + + def __init__( + self, + *, + layout_parser: Optional[LayoutParser] = None, + text_formula_ocr: Optional[TextFormulaOCR] = None, + table_ocr: Optional[TableOCR] = None, + enable_formula: bool = True, + **kwargs, + ): + """ + Initialize the Pix2Text object. + Args: + layout_parser (LayoutParser): The layout parser object; default value is `None`, which means to create a default one + text_formula_ocr (TextFormulaOCR): The text and formula OCR object; default value is `None`, which means to create a default one + table_ocr (TableOCR): The table OCR object; default value is `None`, which means not to recognize tables + enable_formula (bool): Whether to enable formula recognition; default value is `True` + **kwargs (dict): Other arguments, currently not used + """ + if layout_parser is None: + device = select_device(None) + layout_parser = prepare_layout_engine({}, device=device) + if text_formula_ocr is None: + device = select_device(None) + text_formula_ocr = TextFormulaOCR.from_config( + None, enable_formula=enable_formula, device=device + ) + self.layout_parser = layout_parser + self.text_formula_ocr = text_formula_ocr + self.table_ocr = table_ocr + self.enable_formula = enable_formula + + @classmethod + def from_config( + cls, + total_configs: Optional[dict] = None, + enable_formula: bool = True, + enable_table: bool = True, + device: str = None, + **kwargs, + ): + """ + Create a Pix2Text object from the configuration. + Args: + total_configs (dict): The total configuration; default value is `None`, which means to use the default configuration. + If not None, it should contain the following keys: + + * `layout`: The layout parser configuration + * `text_formula`: The TextFormulaOCR configuration + * `table`: The table OCR configuration + enable_formula (bool): Whether to enable formula recognition; default value is `True` + enable_table (bool): Whether to enable table recognition; default value is `True` + device (str): The device to run the model; optional values are 'cpu', 'gpu' or 'cuda'; + default value is `None`, which means to select the device automatically + **kwargs (dict): Other arguments + + Returns: a Pix2Text object + + """ + total_configs = total_configs or {} + layout_config = total_configs.get('layout', None) + text_formula_config = total_configs.get('text_formula', None) + table_config = total_configs.get('table', None) + + layout_parser = prepare_layout_engine(layout_config, device=device) + text_formula_ocr = prepare_text_formula_ocr_engine( + text_formula_config, enable_formula, device, **kwargs + ) + if enable_table: + table_ocr = prepare_table_ocr_engine( + table_config, + device=device, + text_formula_ocr=text_formula_ocr, + **kwargs, + ) + else: + table_ocr = None + + return cls( + layout_parser=layout_parser, + text_formula_ocr=text_formula_ocr, + table_ocr=table_ocr, + enable_formula=enable_formula, + **kwargs, + ) + + def __call__(self, img: Union[str, Path, Image.Image], **kwargs) -> Page: + return self.recognize_page(img, page_id='0', **kwargs) + + def recognize( + self, + img: Union[str, Path, Image.Image], + file_type: Literal[ + 'pdf', 'page', 'text_formula', 'formula', 'text' + ] = 'text_formula', + **kwargs, + ) -> Union[Document, Page, str, List[str], List[Any], List[List[Any]]]: + """ + Recognize the content of the image or pdf file according to the specified type. + It will call the corresponding recognition function `.recognize_{img_type}()` according to the `img_type`. + Args: + img (Union[str, Path, Image.Image]): The image/pdf file path or `Image.Image` object + file_type (str): Supported file types: 'pdf', 'page', 'text_formula', 'formula', 'text' + **kwargs (dict): Arguments for the corresponding recognition function + + Returns: recognized results + + """ + rec_func = getattr(self, f'recognize_{file_type}', None) + if rec_func is None: + raise ValueError(f'Unsupported file type: {file_type}') + return rec_func(img, **kwargs) + + def recognize_pdf( + self, + pdf_fp: Union[str, Path], + pdf_number: int = 0, + pdf_id: Optional[str] = None, + page_numbers: Optional[List[int]] = None, + **kwargs, + ) -> Document: + """ + recognize a pdf file + Args: + pdf_fp (Union[str, Path]): pdf file path + pdf_number (int): pdf number + pdf_id (str): pdf id + page_numbers (List[int]): page numbers to recognize; default is `None`, which means to recognize all pages. + Numbers started from 0 (the first page). + kwargs (dict): Optional keyword arguments. The same as `recognize_page` + + Returns: a Document object. Use `doc.to_markdown('output-dir')` to get the markdown output of the recognized document. + + """ + pdf_id = pdf_id or str(pdf_number) + + doc = fitz.open(pdf_fp, filetype='pdf') + if page_numbers is None: + page_numbers = list(range(len(doc))) + outs = [] + for page_num in range(len(doc)): + if page_num not in page_numbers: + continue + page = doc.load_page(page_num) + # convert to image + pix = page.get_pixmap(dpi=300) + # convert the pixmap to bytes + img_data = pix.tobytes(output='jpg', jpg_quality=200) + # Create a PIL Image from the raw image data + image = Image.open(io.BytesIO(img_data)).convert('RGB') + page_id = str(page_num) + page_kwargs = deepcopy(kwargs) + if kwargs.get('save_debug_res'): + page_kwargs['save_debug_res'] = os.path.join( + kwargs['save_debug_res'], f'{pdf_id}-{page_id}' + ) + outs.append( + self.recognize_page( + image, page_number=page_num, page_id=page_id, **page_kwargs + ) + ) + return Document( + number=pdf_number, + id=pdf_id, + pages=outs, + spellchecker=self.text_formula_ocr.spellchecker, + config=kwargs, + ) + + def recognize_page( + self, + img: Union[str, Path, Image.Image], + page_number: int = 0, + page_id: Optional[str] = None, + **kwargs, + ) -> Page: + """ + Analyze the layout of the image, and then recognize the information contained in each section. + + Args: + img (str or Image.Image): an image path, or `Image.Image` loaded by `Image.open()` + page_number (str): page number; default value is `0` + page_id (str): page id; default value is `None`, which means to use the `str(page_number)` + kwargs (): + * resized_shape (int): Resize the image width to this size for processing; default value is `768` + * mfr_batch_size (int): batch size for MFR; When running on GPU, this value is suggested to be set to greater than 1; default value is `1` + * embed_sep (tuple): Prefix and suffix for embedding latex; only effective when `return_text` is `True`; default value is `(' $', '$ ')` + * isolated_sep (tuple): Prefix and suffix for isolated latex; only effective when `return_text` is `True`; default value is two-dollar signs + * line_sep (str): The separator between lines of text; only effective when `return_text` is `True`; default value is a line break + * auto_line_break (bool): Automatically line break the recognized text; only effective when `return_text` is `True`; default value is `True` + * det_text_bbox_max_width_expand_ratio (float): Expand the width of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.3` + * det_text_bbox_max_height_expand_ratio (float): Expand the height of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.2` + * embed_ratio_threshold (float): The overlap threshold for embed formulas and text lines; default value is `0.6`. + When the overlap between an embed formula and a text line is greater than or equal to this threshold, + the embed formula and the text line are considered to be on the same line; + otherwise, they are considered to be on different lines. + * table_as_image (bool): If `True`, the table will be recognized as an image (don't parse the table content as text) ; default value is `False` + * title_contain_formula (bool): If `True`, the title of the page will be recognized as a mixed image (text and formula). If `False`, it will be recognized as a text; default value is `False` + * text_contain_formula (bool): If `True`, the text of the page will be recognized as a mixed image (text and formula). If `False`, it will be recognized as a text; default value is `True` + * formula_rec_kwargs (dict): generation arguments passed to formula recognizer `latex_ocr`; default value is `{}` + * save_debug_res (str): if `save_debug_res` is set, the directory to save the debug results; default value is `None`, which means not to save + + Returns: a Page object. Use `page.to_markdown('output-dir')` to get the markdown output of the recognized page. + """ + if isinstance(img, Image.Image): + img0 = img.convert('RGB') + else: + img0 = read_img(img, return_type='Image') + + page_id = page_id or str(page_number) + kwargs['embed_sep'] = kwargs.get('embed_sep', (' $', '$ ')) + kwargs['isolated_sep'] = kwargs.get('isolated_sep', ('$$\n', '\n$$')) + kwargs['line_sep'] = kwargs.get('line_sep', '\n') + kwargs['auto_line_break'] = kwargs.get('auto_line_break', True) + kwargs['title_contain_formula'] = kwargs.get('title_contain_formula', False) + kwargs['text_contain_formula'] = kwargs.get('text_contain_formula', True) + resized_shape = kwargs.get('resized_shape', 768) + kwargs['resized_shape'] = resized_shape + layout_kwargs = deepcopy(kwargs) + layout_kwargs['resized_shape'] = resized_shape + layout_kwargs['table_as_image'] = kwargs.get('table_as_image', False) + if self.table_ocr is None: + layout_kwargs['table_as_image'] = True + layout_out, column_meta = self.layout_parser.parse( + img0.copy(), **layout_kwargs, + ) + + debug_dir = None + if kwargs.get('save_debug_res', None): + debug_dir = Path(kwargs.get('save_debug_res')) + debug_dir.mkdir(exist_ok=True, parents=True) + + outs = [] + for _id, box_info in enumerate(layout_out): + image_type = box_info['type'] + if image_type == ElementType.IGNORED: + continue + box = box2list(box_info['position']) + crop_patch = img0.crop(box) + crop_width, _ = crop_patch.size + score = 1.0 + if not self.enable_formula and image_type == ElementType.FORMULA: + image_type = ElementType.TEXT + if image_type in (ElementType.TEXT, ElementType.TITLE, ElementType.PLAIN_TEXT): + _resized_shape = resized_shape + while crop_width > 1.5 * _resized_shape and _resized_shape < 2048: + _resized_shape = min(int(1.5 * _resized_shape), 2048) + padding_patch = add_img_margin( + crop_patch, left_right_margin=30, top_bottom_margin=30 + ) + text_formula_kwargs = deepcopy(kwargs) + text_formula_kwargs['resized_shape'] = _resized_shape + text_formula_kwargs['save_analysis_res'] = ( + debug_dir / f'{_id}-{image_type.name}.png' if debug_dir else None + ) + if image_type == ElementType.TITLE: + text_formula_kwargs['contain_formula'] = kwargs[ + 'title_contain_formula' + ] + elif image_type == ElementType.TEXT: + text_formula_kwargs['contain_formula'] = kwargs[ + 'text_contain_formula' + ] + elif image_type == ElementType.PLAIN_TEXT: + text_formula_kwargs['contain_formula'] = False + image_type = ElementType.TEXT + + text_formula_kwargs['return_text'] = False + _out = self.text_formula_ocr.recognize( + padding_patch, **text_formula_kwargs, + ) + text, meta = None, _out + score = float(np.mean([x['score'] for x in _out])) + elif image_type == ElementType.TABLE: + xmin, ymin, xmax, ymax = box + img_width, img_height = img0.size + table_expansion_margin = 10 + xmin, ymin = ( + max(0, xmin - table_expansion_margin), + max(0, ymin - table_expansion_margin), + ) + xmax, ymax = ( + min(img_width, xmax + table_expansion_margin), + min(img_height, ymax + table_expansion_margin), + ) + box = (xmin, ymin, xmax, ymax) + crop_patch = img0.crop(box) + save_analysis_res = ( + debug_dir / f'{_id}-{image_type.name}.png' if debug_dir else None + ) + table_kwargs = deepcopy(kwargs) + table_kwargs['save_analysis_res'] = save_analysis_res + _out = self.table_ocr.recognize( + crop_patch, + out_cells=True, + out_markdown=True, + out_html=True, + **table_kwargs, + ) + text, meta = None, _out + elif image_type == ElementType.FORMULA: + formula_kwargs = deepcopy(kwargs) + formula_kwargs['return_text'] = False + _out = self.text_formula_ocr.recognize_formula( + crop_patch, **formula_kwargs + ) + score = _out['score'] + text, meta = None, _out + elif image_type == ElementType.FIGURE: + text, meta = '', None + else: + image_type = ElementType.UNKNOWN + text, meta = '', None + + outs.append( + Element( + id=f'{page_id}-{_id}', + box=box, + meta=meta, + text=text, + isolated=box_info['isolated'], + col_number=box_info['col_number'], + type=image_type, + score=score, + total_img=img0, + spellchecker=self.text_formula_ocr.spellchecker, + configs=kwargs, + ) + ) + + remaining_blocks = self._parse_remaining( + img0, layout_out, column_meta, debug_dir, **kwargs + ) + for box_info in remaining_blocks: + outs.append( + Element( + id=f'{page_id}-{len(outs)}-remaining', + box=box2list(box_info['position']), + meta=None, + text=box_info['text'], + isolated=False, + col_number=box_info['col_number'], + type=ElementType.TEXT + if box_info['type'] != 'isolated' + else ElementType.FORMULA, + score=box_info['score'], + total_img=img0, + spellchecker=self.text_formula_ocr.spellchecker, + configs=kwargs, + ) + ) + return Page( + number=page_number, + id=page_id, + elements=outs, + spellchecker=self.text_formula_ocr.spellchecker, + config=kwargs, + ) + + def _parse_remaining(self, img0, layout_out, column_meta, debug_dir, **kwargs): + masked_img = np.array(img0.copy()) + bg_color = get_background_color(img0) + # 把layout parser 已解析出的部分mask掉,然后对其他部分进行OCR + for _box_info in layout_out: + xmin, ymin, xmax, ymax = box2list(_box_info['position']) + masked_img[ymin:ymax, xmin:xmax, :] = bg_color + masked_img = Image.fromarray(masked_img) + + text_formula_kwargs = deepcopy(kwargs) + text_formula_kwargs['return_text'] = False + save_analysis_res = debug_dir / f'layout-remaining.png' if debug_dir else None + text_formula_kwargs['save_analysis_res'] = save_analysis_res + _out = self.text_formula_ocr.recognize(masked_img, **text_formula_kwargs,) + min_text_length = kwargs.get('min_text_length', 4) + _out = [_o for _o in _out if len(_o['text']) >= min_text_length] + # guess which column the box belongs to + for _box_info in _out: + overlap_vals = [] + for col_number, col_info in column_meta.items(): + overlap_val = x_overlap(_box_info, col_info, key='position') + overlap_vals.append([col_number, overlap_val]) + if len(overlap_vals) > 0: + overlap_vals.sort(key=lambda x: (x[1], x[0]), reverse=True) + match_col_number = overlap_vals[0][0] + else: + match_col_number = 0 + _box_info['col_number'] = match_col_number + + if len(_out) < 2: + return _out + + def _compare(box_info1, box_info2): + if box_info1['col_number'] != box_info2['col_number']: + return box_info1['col_number'] < box_info2['col_number'] + else: + return box_info1['position'][0, 1] < box_info2['position'][0, 1] + + _out = sorted(_out, key=cmp_to_key(_compare)) + + begin_idx = 0 + end_idx = 1 + + new_blocks = [] + while end_idx <= len(_out): + while ( + end_idx < len(_out) + and _out[end_idx]['col_number'] == _out[begin_idx]['col_number'] + ): + end_idx += 1 + col_outs = _out[begin_idx:end_idx] + begin_idx = end_idx + end_idx += 1 + if len(col_outs) < 2: + new_blocks.append(col_outs[0]) + else: + new_blocks.extend( + _separate_blocks( + col_outs, self.text_formula_ocr.spellchecker, **kwargs + ) + ) + + return new_blocks + + def recognize_text_formula( + self, img: Union[str, Path, Image.Image], return_text: bool = True, **kwargs, + ) -> Union[str, List[str], List[Any], List[List[Any]]]: + """ + Analyze the layout of the image, and then recognize the information contained in each section. + + Args: + img (str or Image.Image): an image path, or `Image.Image` loaded by `Image.open()` + return_text (bool): Whether to return the recognized text; default value is `True` + kwargs (): + * resized_shape (int): Resize the image width to this size for processing; default value is `768` + * save_analysis_res (str): Save the mfd result image in this file; default is `None`, which means not to save + * mfr_batch_size (int): batch size for MFR; When running on GPU, this value is suggested to be set to greater than 1; default value is `1` + * embed_sep (tuple): Prefix and suffix for embedding latex; only effective when `return_text` is `True`; default value is `(' $', '$ ')` + * isolated_sep (tuple): Prefix and suffix for isolated latex; only effective when `return_text` is `True`; default value is two-dollar signs + * line_sep (str): The separator between lines of text; only effective when `return_text` is `True`; default value is a line break + * auto_line_break (bool): Automatically line break the recognized text; only effective when `return_text` is `True`; default value is `True` + * det_text_bbox_max_width_expand_ratio (float): Expand the width of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.3` + * det_text_bbox_max_height_expand_ratio (float): Expand the height of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.2` + * embed_ratio_threshold (float): The overlap threshold for embed formulas and text lines; default value is `0.6`. + When the overlap between an embed formula and a text line is greater than or equal to this threshold, + the embed formula and the text line are considered to be on the same line; + otherwise, they are considered to be on different lines. + * table_as_image (bool): If `True`, the table will be recognized as an image; default value is `False` + * formula_rec_kwargs (dict): generation arguments passed to formula recognizer `latex_ocr`; default value is `{}` + + Returns: a str when `return_text` is `True`; or a list of ordered (top to bottom, left to right) dicts when `return_text` is `False`, + with each dict representing one detected box, containing keys: + + * `type`: The category of the image; Optional: 'text', 'isolated', 'embedding' + * `text`: The recognized text or Latex formula + * `score`: The confidence score [0, 1]; the higher, the more confident + * `position`: Position information of the block, `np.ndarray`, with shape of [4, 2] + * `line_number`: The line number of the box (first line `line_number==0`), boxes with the same value indicate they are on the same line + + """ + return self.text_formula_ocr.recognize(img, return_text, **kwargs) + + def recognize_text( + self, + imgs: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]], + return_text: bool = True, + rec_config: Optional[dict] = None, + **kwargs, + ) -> Union[str, List[str], List[Any], List[List[Any]]]: + """ + Recognize a pure Text Image. + Args: + imgs (Union[str, Path, Image.Image], List[str], List[Path], List[Image.Image]): The image or list of images + return_text (bool): Whether to return only the recognized text; default value is `True` + rec_config (Optional[dict]): The config for recognition + kwargs (): Other parameters for `text_ocr.ocr()` + + Returns: Text str or list of text strs when `return_text` is True; + `List[Any]` or `List[List[Any]]` when `return_text` is False, with the same length as `imgs` and the following keys: + + * `position`: Position information of the block, `np.ndarray`, with a shape of [4, 2] + * `text`: The recognized text + * `score`: The confidence score [0, 1]; the higher, the more confident + + """ + return self.text_formula_ocr.recognize_text( + imgs, return_text, rec_config, **kwargs + ) + + def recognize_formula( + self, + imgs: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]], + batch_size: int = 1, + return_text: bool = True, + rec_config: Optional[dict] = None, + **kwargs, + ) -> Union[str, List[str], Dict[str, Any], List[Dict[str, Any]]]: + """ + Recognize pure Math Formula images to LaTeX Expressions + Args: + imgs (Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]): The image or list of images + batch_size (int): The batch size + return_text (bool): Whether to return only the recognized text; default value is `True` + rec_config (Optional[dict]): The config for recognition + **kwargs (): Special model parameters. Not used for now + + Returns: The LaTeX Expression or list of LaTeX Expressions; + str or List[str] when `return_text` is True; + Dict[str, Any] or List[Dict[str, Any]] when `return_text` is False, with the following keys: + + * `text`: The recognized LaTeX text + * `score`: The confidence score [0, 1]; the higher, the more confident + + """ + return self.text_formula_ocr.recognize_formula( + imgs, batch_size, return_text, rec_config, **kwargs + ) + + +def _separate_blocks(col_outs, spellchecker, **kwargs): + out_blocks = [] + + def _merge_lines(cur_block_lines): + if len(cur_block_lines) < 2: + return cur_block_lines[0] + ymin = cur_block_lines[0]['position'][0, 1] + ymax = cur_block_lines[-1]['position'][3, 1] + xmin = min([_b['position'][0, 0] for _b in cur_block_lines]) + xmax = max([_b['position'][3, 0] for _b in cur_block_lines]) + position = list2box(xmin, ymin, xmax, ymax) + score = np.mean([_b['score'] for _b in cur_block_lines]) + col_number = cur_block_lines[0]['col_number'] + # text = smart_join([_b['text'] for _b in cur_block_lines], spellchecker) + text = merge_line_texts( + cur_block_lines, + auto_line_break=kwargs['auto_line_break'], + line_sep=kwargs['line_sep'], + embed_sep=kwargs['embed_sep'], + isolated_sep=kwargs['isolated_sep'], + spellchecker=spellchecker, + ) + + return { + 'type': 'text', + 'text': text, + 'position': position, + 'score': score, + 'col_number': col_number, + 'line_number': len(out_blocks), + } + + cur_block_lines = [col_outs[0]] + for _box_info in col_outs[1:]: + cur_height = ( + cur_block_lines[-1]['position'][3, 1] + - cur_block_lines[-1]['position'][0, 1] + ) + if ( + _box_info['position'][0, 1] - cur_block_lines[-1]['position'][3, 1] + < cur_height + ): + # 当前行与下一行的间距少于一行的行高,则认为它们在相同的block + cur_block_lines.append(_box_info) + else: + # merge lines + merged_line = _merge_lines(cur_block_lines) + out_blocks.append(merged_line) + + cur_block_lines = [_box_info] + + if len(cur_block_lines) > 0: + merged_line = _merge_lines(cur_block_lines) + out_blocks.append(merged_line) + + return out_blocks + + +if __name__ == '__main__': + from .utils import set_logger + + logger = set_logger(log_level='DEBUG') + + p2t = Pix2Text() + img = 'docs/examples/english.jpg' + img = read_img(img, return_type='Image') + out = p2t.recognize(img) + logger.info(out) diff --git a/pix2text/render.py b/pix2text/render.py new file mode 100644 index 0000000000000000000000000000000000000000..e36470bd69210fb2ac86eb9ad1b0012e74c32675 --- /dev/null +++ b/pix2text/render.py @@ -0,0 +1,217 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). + +COLOR_MAPPING = { + 'general': '#009933', + 'english': '#3399ff', + 'formula': '#ff8000', + 'hybrid': '#009999', + 'text': '#3399ff', + 'isolated': '#ff8000', + 'text-embed': '#009999', +} + +def render_html(newest_fp, image_type, text, out_html_fp): + html_str = """ + + + + + + + + + + +
+

Pix2Text: a free tool like Mathpix

+

Screenshot

+
+ """ + html_str += fr'' + '\n
' + html_str += """ + +
+
+ +

Results

+ """ + + html_str += r'Image Type: ' \ + fr'
' \ + fr'{image_type}
' + '\n' + + if image_type in ('formula', 'hybrid'): + html_str += '
' + + html_str += """ + +
+ +
+ + """ + + html_str += '\n' + + html_str += """ +
+ + + +
+
+ + + + + + + + + + """ + + with open(out_html_fp, 'w', encoding='utf-8') as f: + f.writelines(html_str) diff --git a/pix2text/serve.py b/pix2text/serve.py new file mode 100644 index 0000000000000000000000000000000000000000..86108ca28c3f6911ddadd02da7159e0cc296b110 --- /dev/null +++ b/pix2text/serve.py @@ -0,0 +1,83 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). +import time +from copy import deepcopy +from pathlib import Path +from typing import Dict, List, Any, Union, Optional + +from pydantic import BaseModel +from fastapi import FastAPI, UploadFile, Form, HTTPException + +from pix2text import set_logger, read_img, Pix2Text + +logger = set_logger(log_level='DEBUG') + +app = FastAPI() + + +@app.get("/") +async def root(): + return {"message": "Welcome to Pix2Text Server!"} + + +class Pix2TextResponse(BaseModel): + status_code: int = 200 + results: Union[str, List[Dict[str, Any]]] + output_dir: Optional[str] = None + + def dict(self, **kwargs): + the_dict = deepcopy(super().dict()) + return the_dict + + +@app.post("/pix2text") +async def ocr( + image: UploadFile, + file_type: str = Form(default='text_formula'), + resized_shape: str = Form(default=768), + embed_sep: str = Form(default=' $,$ '), + isolated_sep: str = Form(default='$$\n, \n$$'), +) -> Dict[str, Any]: + # curl 调用方式: + # $ curl -F image=@docs/examples/english.jpg --form 'image_type=mixed' --form 'resized_shape=768' \ + # http://0.0.0.0:8503/pix2text + global P2T, OUTPUT_MD_ROOT_DIR + if file_type not in ('text', 'formula', 'text_formula', 'page'): + raise HTTPException(status_code=400, detail='file_type must be one of "text", "formula", "text_formula", "page"') + + img_file = image.file + fn = Path(image.filename) + img0 = read_img(img_file, return_type='Image') + embed_sep = embed_sep.split(',') + isolated_sep = isolated_sep.split(',') + # use_analyzer = use_analyzer.lower() != 'false' if isinstance(use_analyzer, str) else use_analyzer + + params = dict(resized_shape=int(resized_shape), return_text=True) + if len(embed_sep) == 2: + params['embed_sep'] = embed_sep + if len(isolated_sep) == 2: + params['isolated_sep'] = isolated_sep + + logger.info(f'input {params=}') + + res = P2T.recognize(img0, file_type=file_type, **params) + output_dir = None + if file_type in ('pdf', 'page'): + output_dir = str(OUTPUT_MD_ROOT_DIR / f'{fn.stem}-{time.time()}') + res = res.to_markdown(output_dir) + logger.info(f'output {res=}') + + return Pix2TextResponse(results=res, output_dir=output_dir).dict() + + +def start_server( + p2t_config, output_md_root_dir, host='0.0.0.0', port=8503, reload=False, **kwargs +): + global P2T, OUTPUT_MD_ROOT_DIR + OUTPUT_MD_ROOT_DIR = Path(output_md_root_dir) + OUTPUT_MD_ROOT_DIR.mkdir(exist_ok=True, parents=True) + P2T = Pix2Text.from_config(**p2t_config) + import uvicorn + + uvicorn.run(app, host=host, port=port, reload=reload, **kwargs) diff --git a/pix2text/table_ocr.py b/pix2text/table_ocr.py new file mode 100644 index 0000000000000000000000000000000000000000..5aa380518541f6f307da2409df4d7a9b0f403a31 --- /dev/null +++ b/pix2text/table_ocr.py @@ -0,0 +1,1131 @@ +# coding: utf-8 +# Adapted from https://github.com/microsoft/table-transformer/blob/main/src/inference.py +import os +import shutil +from collections import defaultdict, OrderedDict +from itertools import chain +from pathlib import Path +from typing import Union, Optional, Dict, Any +from copy import deepcopy +import xml.etree.ElementTree as ET + +import torch +from torchvision import transforms +from PIL import Image +from fitz import Rect +import numpy as np +import pandas as pd +from transformers import AutoModelForObjectDetection + +# from transformers import TableTransformerForObjectDetection + +from .consts import MODEL_VERSION +from .ocr_engine import TextOcrEngine +from .utils import ( + select_device, + data_dir, + read_img, + rotated_box_to_horizontal, + is_valid_box, + list2box, + box2list, + sort_boxes, + merge_line_texts, + prepare_model_files2, +) +from . import table_postprocess as postprocess + + +# detection_class_thresholds = {"table": 0.5, "table rotated": 0.5, "no object": 10} +DEFAULT_STRUCTURE_THRESHOLDS = { + "table": 0.5, + "table column": 0.5, + "table row": 0.5, + "table column header": 0.5, + "table projected row header": 0.5, + "table spanning cell": 0.5, + "no object": 10, +} + + +DEFAULT_CONFIGS = { + 'model_dir': None, + 'root': data_dir(), + 'structure_thresholds': DEFAULT_STRUCTURE_THRESHOLDS, + 'table_expansion_margin': 10, + 'threshold_percentage': 0.10, +} + + +class TableOCR(object): + """ + Represents a table extractor for extracting tables from a document. + """ + + def __init__( + self, + text_ocr: TextOcrEngine, + spellchecker=None, + device: str = None, + model_dir: Optional[Union[str, Path]] = None, + root: Union[str, Path] = data_dir(), + structure_thresholds=None, + table_expansion_margin=10, + threshold_percentage=0.10, + **kwargs, + ): + """ + Initialize an TableDataExtractor object. + """ + self.text_ocr = text_ocr + self.spellchecker = spellchecker + + self.str_device = select_device(device) + self.str_class_name2idx = get_class_map('structure') + self.str_class_idx2name = {v: k for k, v in self.str_class_name2idx.items()} + self.str_class_thresholds = structure_thresholds or DEFAULT_STRUCTURE_THRESHOLDS + + if model_dir is None: + model_dir = self._prepare_model_files(root, None) + # Initialize the model for identifying table structures + self.str_model = AutoModelForObjectDetection.from_pretrained(model_dir).to( + self.str_device + ) + self.str_model.eval() + + # Expand the bounding box slightly for better cropping + self._table_expansion_margin = table_expansion_margin + + # Use a percentage (e.g., 10%) of the average height as the threshold for a new row + self._threshold_percentage = threshold_percentage + self.test = [] + + @classmethod + def from_config( + cls, + text_ocr: TextOcrEngine, + spellchecker=None, + configs: Optional[dict] = None, + device: str = None, + **kwargs, + ): + configs = configs or {} + def_configs = deepcopy(DEFAULT_CONFIGS) + def_configs.update(configs) + configs = def_configs + configs['device'] = select_device(device) + + return cls( + text_ocr=text_ocr, + spellchecker=spellchecker, + device=device, + model_dir=configs['model_dir'], + root=configs['root'], + structure_thresholds=configs['structure_thresholds'], + table_expansion_margin=configs['table_expansion_margin'], + threshold_percentage=configs['threshold_percentage'], + **kwargs, + ) + + def _prepare_model_files(self, root, model_info): + model_root_dir = Path(root) / MODEL_VERSION + # model_dir = model_root_dir / model_info['local_model_id'] + model_dir = model_root_dir / 'table-rec' + if model_dir.is_dir() and list(model_dir.glob('**/[!.]*')): + return model_dir + model_dir = prepare_model_files2( + model_fp_or_dir=model_dir, + remote_repo="breezedeus/pix2text-table-rec", + file_or_dir="dir", + ) + return model_dir + + def recognize( + self, + img, + tokens=None, + out_objects=False, + out_cells=True, + out_html=False, + out_csv=False, + out_markdown=True, + **kwargs, + ) -> Dict[str, Any]: + """ + + Args: + img (): + tokens (): + out_objects (): + out_cells (): + out_html (): + out_csv (): + out_markdown (): + **kwargs (): + + * save_analysis_res (str): Save the parsed result image in this file; default value is `None`, which means not to save + + Returns: + + """ + out_formats = {} + if self.str_model is None: + print("No structure model loaded.") + return out_formats + + if not (out_objects or out_cells or out_html or out_csv): + print("No output format specified") + return out_formats + + if isinstance(img, (str, Path)): + img = read_img(img, return_type='Image') + # Transform the image how the model expects it + img_tensor = structure_transform(img) + + # Run input image through the model + with torch.no_grad(): + outputs = self.str_model(img_tensor.unsqueeze(0).to(self.str_device)) + + # Post-process detected objects, assign class labels + objects = outputs_to_objects(outputs, img.size, self.str_class_idx2name) + if out_objects: + out_formats['objects'] = objects + if not (out_cells or out_html or out_csv): + return out_formats + + # Further process the detected objects so they correspond to a consistent table + tokens = tokens or [] + tables_structure = objects_to_structures( + objects, tokens, self.str_class_thresholds + ) + + # Enumerate all table cells: grid cells and spanning cells + tables_cells = [ + structure_to_cells(structure, tokens)[0] for structure in tables_structure + ] + for cells in tables_cells: + self._ocr_texts(img, cells) + if out_cells: + out_formats['cells'] = tables_cells + if kwargs.get('save_analysis_res'): + visualize_cells(img, tables_cells[0], kwargs['save_analysis_res']) + + if not (out_html or out_csv): + return out_formats + + # Convert cells to HTML + if out_html: + tables_htmls = [cells_to_html(cells) for cells in tables_cells] + out_formats['html'] = tables_htmls + + # Convert cells to CSV, including flattening multi-row column headers to a single row + if out_csv: + tables_csvs = [cells_to_csv(cells) for cells in tables_cells] + out_formats['csv'] = tables_csvs + + if out_markdown: + tables_mds = [cells_to_markdown(cells) for cells in tables_cells] + out_formats['markdown'] = tables_mds + + return out_formats + + def _ocr_texts(self, img: Image.Image, cells): + text_box_infos = self.text_ocr.detect_only(np.array(img)) + box_infos = [] + for line_box_info in text_box_infos['detected_texts']: + _text_box = rotated_box_to_horizontal(line_box_info['position']) + if not is_valid_box(_text_box, min_height=8, min_width=2): + continue + box_infos.append({'position': _text_box}) + for t_cell in cells: + table_box = t_cell['bbox'] + inner_text_boxes = [] + for box_info in box_infos: + _pos = box_info['position'] + text_box = [_pos[0][0], _pos[0][1], _pos[2][0], _pos[2][1]] + inner_box = list2box(*cut_bbox(table_box, text_box)) + if is_valid_box(inner_box): + inner_text_boxes.append({'position': inner_box}) + if inner_text_boxes: + for _box_info in inner_text_boxes: + _box = box2list(_box_info['position']) + ocr_out = self.text_ocr.recognize_only(np.array(img.crop(_box))) + _box_info['text'] = ocr_out['text'] + _box_info['type'] = 'text' + outs = sort_boxes(inner_text_boxes, key='position') + t_cell['text_bboxes'] = outs + outs = list(chain(*outs)) + t_cell['cell text'] = merge_line_texts( + outs, + auto_line_break=True, + line_sep=' ', + spellchecker=self.spellchecker, + ) + + +def cut_bbox(anchor_box, box2): + # x1, y1, x2, y2 + x1 = max(anchor_box[0], box2[0]) + y1 = max(anchor_box[1], box2[1]) + x2 = min(anchor_box[2], box2[2]) + y2 = min(anchor_box[3], box2[3]) + return x1, y1, x2, y2 + + +class MaxResize(object): + def __init__(self, max_size=800): + self.max_size = max_size + + def __call__(self, image): + width, height = image.size + current_max_size = max(width, height) + scale = self.max_size / current_max_size + resized_image = image.resize( + (int(round(scale * width)), int(round(scale * height))) + ) + + return resized_image + + +def get_class_map(data_type): + class_map = {} + if data_type == 'structure': + class_map = { + 'table': 0, + 'table column': 1, + 'table row': 2, + 'table column header': 3, + 'table projected row header': 4, + 'table spanning cell': 5, + 'no object': 6, + } + elif data_type == 'detection': + class_map = {'table': 0, 'table rotated': 1, 'no object': 2} + return class_map + + +detection_transform = transforms.Compose( + [ + MaxResize(800), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] +) + +structure_transform = transforms.Compose( + [ + MaxResize(1000), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] +) + + +def box_cxcywh_to_xyxy(x): + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=1) + + +def rescale_bboxes(out_bbox, size): + img_w, img_h = size + b = box_cxcywh_to_xyxy(out_bbox) + b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32) + return b + + +def iob(bbox1, bbox2): + """ + Compute the intersection area over box area, for bbox1. + """ + intersection = Rect(bbox1).intersect(bbox2) + + bbox1_area = Rect(bbox1).get_area() + if bbox1_area > 0: + return intersection.get_area() / bbox1_area + + return 0 + + +def align_headers(headers, rows): + """ + Adjust the header boundary to be the convex hull of the rows it intersects + at least 50% of the height of. + + For now, we are not supporting tables with multiple headers, so we need to + eliminate anything besides the top-most header. + """ + + aligned_headers = [] + + for row in rows: + row['column header'] = False + + header_row_nums = [] + for header in headers: + for row_num, row in enumerate(rows): + row_height = row['bbox'][3] - row['bbox'][1] + min_row_overlap = max(row['bbox'][1], header['bbox'][1]) + max_row_overlap = min(row['bbox'][3], header['bbox'][3]) + overlap_height = max_row_overlap - min_row_overlap + if overlap_height / row_height >= 0.5: + header_row_nums.append(row_num) + + if len(header_row_nums) == 0: + return aligned_headers + + header_rect = Rect() + if header_row_nums[0] > 0: + header_row_nums = list(range(header_row_nums[0] + 1)) + header_row_nums + + last_row_num = -1 + for row_num in header_row_nums: + if row_num == last_row_num + 1: + row = rows[row_num] + row['column header'] = True + header_rect = header_rect.include_rect(row['bbox']) + last_row_num = row_num + else: + # Break as soon as a non-header row is encountered. + # This ignores any subsequent rows in the table labeled as a header. + # Having more than 1 header is not supported currently. + break + + header = {'bbox': list(header_rect)} + aligned_headers.append(header) + + return aligned_headers + + +def refine_table_structure(table_structure, class_thresholds): + """ + Apply operations to the detected table structure objects such as + thresholding, NMS, and alignment. + """ + rows = table_structure["rows"] + columns = table_structure['columns'] + + # Process the headers + column_headers = table_structure['column headers'] + column_headers = postprocess.apply_threshold( + column_headers, class_thresholds["table column header"] + ) + column_headers = postprocess.nms(column_headers) + column_headers = align_headers(column_headers, rows) + + # Process spanning cells + spanning_cells = [ + elem + for elem in table_structure['spanning cells'] + if not elem['projected row header'] + ] + projected_row_headers = [ + elem + for elem in table_structure['spanning cells'] + if elem['projected row header'] + ] + spanning_cells = postprocess.apply_threshold( + spanning_cells, class_thresholds["table spanning cell"] + ) + projected_row_headers = postprocess.apply_threshold( + projected_row_headers, class_thresholds["table projected row header"] + ) + spanning_cells += projected_row_headers + # Align before NMS for spanning cells because alignment brings them into agreement + # with rows and columns first; if spanning cells still overlap after this operation, + # the threshold for NMS can basically be lowered to just above 0 + spanning_cells = postprocess.align_supercells(spanning_cells, rows, columns) + spanning_cells = postprocess.nms_supercells(spanning_cells) + + postprocess.header_supercell_tree(spanning_cells) + + table_structure['columns'] = columns + table_structure['rows'] = rows + table_structure['spanning cells'] = spanning_cells + table_structure['column headers'] = column_headers + + return table_structure + + +def outputs_to_objects(outputs, img_size, class_idx2name): + m = outputs['logits'].softmax(-1).max(-1) + pred_labels = list(m.indices.detach().cpu().numpy())[0] + pred_scores = list(m.values.detach().cpu().numpy())[0] + pred_bboxes = outputs['pred_boxes'].detach().cpu()[0] + pred_bboxes = [elem.tolist() for elem in rescale_bboxes(pred_bboxes, img_size)] + + objects = [] + for label, score, bbox in zip(pred_labels, pred_scores, pred_bboxes): + class_label = class_idx2name[int(label)] + if not class_label == 'no object': + objects.append( + { + 'label': class_label, + 'score': float(score), + 'bbox': [float(elem) for elem in bbox], + } + ) + + return objects + + +def objects_to_crops(img, tokens, objects, class_thresholds, padding=10): + """ + Process the bounding boxes produced by the table detection model into + cropped table images and cropped tokens. + """ + + table_crops = [] + for obj in objects: + if obj['score'] < class_thresholds[obj['label']]: + continue + + cropped_table = {} + + bbox = obj['bbox'] + bbox = [ + bbox[0] - padding, + bbox[1] - padding, + bbox[2] + padding, + bbox[3] + padding, + ] + + cropped_img = img.crop(bbox) + + table_tokens = [token for token in tokens if iob(token['bbox'], bbox) >= 0.5] + for token in table_tokens: + token['bbox'] = [ + token['bbox'][0] - bbox[0], + token['bbox'][1] - bbox[1], + token['bbox'][2] - bbox[0], + token['bbox'][3] - bbox[1], + ] + + # If table is predicted to be rotated, rotate cropped image and tokens/words: + if obj['label'] == 'table rotated': + cropped_img = cropped_img.rotate(270, expand=True) + for token in table_tokens: + bbox = token['bbox'] + bbox = [ + cropped_img.size[0] - bbox[3] - 1, + bbox[0], + cropped_img.size[0] - bbox[1] - 1, + bbox[2], + ] + token['bbox'] = bbox + + cropped_table['image'] = cropped_img + cropped_table['tokens'] = table_tokens + + table_crops.append(cropped_table) + + return table_crops + + +def objects_to_structures(objects, tokens, class_thresholds): + """ + Process the bounding boxes produced by the table structure recognition model into + a *consistent* set of table structures (rows, columns, spanning cells, headers). + This entails resolving conflicts/overlaps, and ensuring the boxes meet certain alignment + conditions (for example: rows should all have the same width, etc.). + """ + + tables = [obj for obj in objects if obj['label'] == 'table'] + table_structures = [] + + for table in tables: + table_objects = [ + obj for obj in objects if iob(obj['bbox'], table['bbox']) >= 0.5 + ] + table_tokens = [ + token for token in tokens if iob(token['bbox'], table['bbox']) >= 0.5 + ] + + structure = {} + + columns = [obj for obj in table_objects if obj['label'] == 'table column'] + rows = [obj for obj in table_objects if obj['label'] == 'table row'] + column_headers = [ + obj for obj in table_objects if obj['label'] == 'table column header' + ] + spanning_cells = [ + obj for obj in table_objects if obj['label'] == 'table spanning cell' + ] + for obj in spanning_cells: + obj['projected row header'] = False + projected_row_headers = [ + obj for obj in table_objects if obj['label'] == 'table projected row header' + ] + for obj in projected_row_headers: + obj['projected row header'] = True + spanning_cells += projected_row_headers + for obj in rows: + obj['column header'] = False + for header_obj in column_headers: + if iob(obj['bbox'], header_obj['bbox']) >= 0.5: + obj['column header'] = True + + # Refine table structures + rows = postprocess.refine_rows( + rows, table_tokens, class_thresholds['table row'] + ) + columns = postprocess.refine_columns( + columns, table_tokens, class_thresholds['table column'] + ) + + # Shrink table bbox to just the total height of the rows + # and the total width of the columns + row_rect = Rect() + for obj in rows: + row_rect.include_rect(obj['bbox']) + column_rect = Rect() + for obj in columns: + column_rect.include_rect(obj['bbox']) + table['row_column_bbox'] = [ + column_rect[0], + row_rect[1], + column_rect[2], + row_rect[3], + ] + table['bbox'] = table['row_column_bbox'] + + # Process the rows and columns into a complete segmented table + columns = postprocess.align_columns(columns, table['row_column_bbox']) + rows = postprocess.align_rows(rows, table['row_column_bbox']) + + structure['rows'] = rows + structure['columns'] = columns + structure['column headers'] = column_headers + structure['spanning cells'] = spanning_cells + + if len(rows) > 0 and len(columns) > 1: + structure = refine_table_structure(structure, class_thresholds) + + table_structures.append(structure) + + return table_structures + + +def structure_to_cells(table_structure, tokens): + """ + Assuming the row, column, spanning cell, and header bounding boxes have + been refined into a set of consistent table structures, process these + table structures into table cells. This is a universal representation + format for the table, which can later be exported to Pandas or CSV formats. + Classify the cells as header/access cells or data cells + based on if they intersect with the header bounding box. + """ + columns = table_structure['columns'] + rows = table_structure['rows'] + spanning_cells = table_structure['spanning cells'] + cells = [] + subcells = [] + + # Identify complete cells and subcells + for column_num, column in enumerate(columns): + for row_num, row in enumerate(rows): + column_rect = Rect(list(column['bbox'])) + row_rect = Rect(list(row['bbox'])) + cell_rect = row_rect.intersect(column_rect) + header = 'column header' in row and row['column header'] + cell = { + 'bbox': list(cell_rect), + 'column_nums': [column_num], + 'row_nums': [row_num], + 'column header': header, + } + + cell['subcell'] = False + for spanning_cell in spanning_cells: + spanning_cell_rect = Rect(list(spanning_cell['bbox'])) + if ( + spanning_cell_rect.intersect(cell_rect).get_area() + / cell_rect.get_area() + ) > 0.5: + cell['subcell'] = True + break + + if cell['subcell']: + subcells.append(cell) + else: + # cell text = extract_text_inside_bbox(table_spans, cell['bbox']) + # cell['cell text'] = cell text + cell['projected row header'] = False + cells.append(cell) + + for spanning_cell in spanning_cells: + spanning_cell_rect = Rect(list(spanning_cell['bbox'])) + cell_columns = set() + cell_rows = set() + cell_rect = None + header = True + for subcell in subcells: + subcell_rect = Rect(list(subcell['bbox'])) + subcell_rect_area = subcell_rect.get_area() + if ( + subcell_rect.intersect(spanning_cell_rect).get_area() + / subcell_rect_area + ) > 0.5: + if cell_rect is None: + cell_rect = Rect(list(subcell['bbox'])) + else: + cell_rect.include_rect(Rect(list(subcell['bbox']))) + cell_rows = cell_rows.union(set(subcell['row_nums'])) + cell_columns = cell_columns.union(set(subcell['column_nums'])) + # By convention here, all subcells must be classified + # as header cells for a spanning cell to be classified as a header cell; + # otherwise, this could lead to a non-rectangular header region + header = ( + header and 'column header' in subcell and subcell['column header'] + ) + if len(cell_rows) > 0 and len(cell_columns) > 0: + cell = { + 'bbox': list(cell_rect), + 'column_nums': list(cell_columns), + 'row_nums': list(cell_rows), + 'column header': header, + 'projected row header': spanning_cell['projected row header'], + } + cells.append(cell) + + # Compute a confidence score based on how well the page tokens + # slot into the cells reported by the model + _, _, cell_match_scores = postprocess.slot_into_containers(cells, tokens) + try: + mean_match_score = sum(cell_match_scores) / len(cell_match_scores) + min_match_score = min(cell_match_scores) + confidence_score = (mean_match_score + min_match_score) / 2 + except: + confidence_score = 0 + + # Dilate rows and columns before final extraction + # dilated_columns = fill_column_gaps(columns, table_bbox) + dilated_columns = columns + # dilated_rows = fill_row_gaps(rows, table_bbox) + dilated_rows = rows + for cell in cells: + column_rect = Rect() + for column_num in cell['column_nums']: + column_rect.include_rect(list(dilated_columns[column_num]['bbox'])) + row_rect = Rect() + for row_num in cell['row_nums']: + row_rect.include_rect(list(dilated_rows[row_num]['bbox'])) + cell_rect = column_rect.intersect(row_rect) + cell['bbox'] = list(cell_rect) + + span_nums_by_cell, _, _ = postprocess.slot_into_containers( + cells, + tokens, + overlap_threshold=0.001, + unique_assignment=True, + forced_assignment=False, + ) + + for cell, cell_span_nums in zip(cells, span_nums_by_cell): + cell_spans = [tokens[num] for num in cell_span_nums] + # TODO: Refine how text is extracted; should be character-based, not span-based; + # but need to associate + cell['cell text'] = postprocess.extract_text_from_spans( + cell_spans, remove_integer_superscripts=False + ) + cell['spans'] = cell_spans + + # Adjust the row, column, and cell bounding boxes to reflect the extracted text + num_rows = len(rows) + rows = postprocess.sort_objects_top_to_bottom(rows) + num_columns = len(columns) + columns = postprocess.sort_objects_left_to_right(columns) + min_y_values_by_row = defaultdict(list) + max_y_values_by_row = defaultdict(list) + min_x_values_by_column = defaultdict(list) + max_x_values_by_column = defaultdict(list) + for cell in cells: + min_row = min(cell["row_nums"]) + max_row = max(cell["row_nums"]) + min_column = min(cell["column_nums"]) + max_column = max(cell["column_nums"]) + for span in cell['spans']: + min_x_values_by_column[min_column].append(span['bbox'][0]) + min_y_values_by_row[min_row].append(span['bbox'][1]) + max_x_values_by_column[max_column].append(span['bbox'][2]) + max_y_values_by_row[max_row].append(span['bbox'][3]) + for row_num, row in enumerate(rows): + if len(min_x_values_by_column[0]) > 0: + row['bbox'][0] = min(min_x_values_by_column[0]) + if len(min_y_values_by_row[row_num]) > 0: + row['bbox'][1] = min(min_y_values_by_row[row_num]) + if len(max_x_values_by_column[num_columns - 1]) > 0: + row['bbox'][2] = max(max_x_values_by_column[num_columns - 1]) + if len(max_y_values_by_row[row_num]) > 0: + row['bbox'][3] = max(max_y_values_by_row[row_num]) + for column_num, column in enumerate(columns): + if len(min_x_values_by_column[column_num]) > 0: + column['bbox'][0] = min(min_x_values_by_column[column_num]) + if len(min_y_values_by_row[0]) > 0: + column['bbox'][1] = min(min_y_values_by_row[0]) + if len(max_x_values_by_column[column_num]) > 0: + column['bbox'][2] = max(max_x_values_by_column[column_num]) + if len(max_y_values_by_row[num_rows - 1]) > 0: + column['bbox'][3] = max(max_y_values_by_row[num_rows - 1]) + for cell in cells: + row_rect = Rect() + column_rect = Rect() + for row_num in cell['row_nums']: + row_rect.include_rect(list(rows[row_num]['bbox'])) + for column_num in cell['column_nums']: + column_rect.include_rect(list(columns[column_num]['bbox'])) + cell_rect = row_rect.intersect(column_rect) + if cell_rect.get_area() > 0: + cell['bbox'] = list(cell_rect) + pass + + return cells, confidence_score + + +def cells_to_csv(cells): + if len(cells) > 0: + num_columns = max([max(cell['column_nums']) for cell in cells]) + 1 + num_rows = max([max(cell['row_nums']) for cell in cells]) + 1 + else: + return + + header_cells = [cell for cell in cells if cell['column header']] + if len(header_cells) > 0: + max_header_row = max([max(cell['row_nums']) for cell in header_cells]) + else: + max_header_row = -1 + + table_array = np.empty([num_rows, num_columns], dtype="object") + if len(cells) > 0: + for cell in cells: + for row_num in cell['row_nums']: + for column_num in cell['column_nums']: + table_array[row_num, column_num] = cell["cell text"] + + header = table_array[: max_header_row + 1, :] + flattened_header = [] + for col in header.transpose(): + flattened_header.append(' | '.join(OrderedDict.fromkeys(col))) + df = pd.DataFrame( + table_array[max_header_row + 1 :, :], index=None, columns=flattened_header + ) + + return df.to_csv(index=None) + + +def cells_to_ET(cells): + cells = sorted(cells, key=lambda k: min(k['column_nums'])) + cells = sorted(cells, key=lambda k: min(k['row_nums'])) + + table = ET.Element("table") + current_row = -1 + + for cell in cells: + this_row = min(cell['row_nums']) + + attrib = {} + colspan = len(cell['column_nums']) + if colspan > 1: + attrib['colspan'] = str(colspan) + rowspan = len(cell['row_nums']) + if rowspan > 1: + attrib['rowspan'] = str(rowspan) + if this_row > current_row: + current_row = this_row + if cell['column header']: + cell_tag = "th" + row = ET.SubElement(table, "thead") + else: + cell_tag = "td" + row = ET.SubElement(table, "tr") + tcell = ET.SubElement(row, cell_tag, attrib=attrib) + tcell.text = cell['cell text'] + return table + + +def cells_to_html(cells): + table = cells_to_ET(cells) + return str(ET.tostring(table, encoding="unicode", short_empty_elements=False)) + + +def cells_to_markdown(cells): + table = cells_to_ET(cells) + return etree_to_markdown_table(table) + + +def etree_to_markdown_table(etree): + """ + 将XML ElementTree对象转换为Markdown格式的表格。 + + Args: + etree (xml.etree.ElementTree.Element): XML表格的根元素。 + + Returns: + str: Markdown格式的表格字符串。 + """ + if etree.tag != 'table': + return "Invalid XML input: root element is not a table." + + markdown_table = [] + headers = [th.text for th in etree.findall('.//th')] + + if headers: + markdown_table.append("| " + " | ".join(headers) + " |") + markdown_table.append("| " + " | ".join(["---"] * len(headers)) + " |") + + rows = etree.findall('.//tr') + if rows: + for row in rows: + cells = [td.text.replace('\n', ' ') for td in row.findall('td')] + if not cells: + continue + markdown_table.append("| " + " | ".join(cells) + " |") + else: + return "Invalid XML input: no rows found." + + return "\n".join(markdown_table) + + +def visualize_detected_tables(img, det_tables, out_path): + import matplotlib.pyplot as plt + import matplotlib.patches as patches + from matplotlib.patches import Patch + + plt.imshow(img, interpolation="lanczos") + plt.gcf().set_size_inches(20, 20) + ax = plt.gca() + + for det_table in det_tables: + bbox = det_table['bbox'] + + if det_table['label'] == 'table': + facecolor = (1, 0, 0.45) + edgecolor = (1, 0, 0.45) + alpha = 0.3 + linewidth = 2 + hatch = '//////' + elif det_table['label'] == 'table rotated': + facecolor = (0.95, 0.6, 0.1) + edgecolor = (0.95, 0.6, 0.1) + alpha = 0.3 + linewidth = 2 + hatch = '//////' + else: + continue + + rect = patches.Rectangle( + bbox[:2], + bbox[2] - bbox[0], + bbox[3] - bbox[1], + linewidth=linewidth, + edgecolor='none', + facecolor=facecolor, + alpha=0.1, + ) + ax.add_patch(rect) + rect = patches.Rectangle( + bbox[:2], + bbox[2] - bbox[0], + bbox[3] - bbox[1], + linewidth=linewidth, + edgecolor=edgecolor, + facecolor='none', + linestyle='-', + alpha=alpha, + ) + ax.add_patch(rect) + rect = patches.Rectangle( + bbox[:2], + bbox[2] - bbox[0], + bbox[3] - bbox[1], + linewidth=0, + edgecolor=edgecolor, + facecolor='none', + linestyle='-', + hatch=hatch, + alpha=0.2, + ) + ax.add_patch(rect) + + plt.xticks([], []) + plt.yticks([], []) + + legend_elements = [ + Patch( + facecolor=(1, 0, 0.45), + edgecolor=(1, 0, 0.45), + label='Table', + hatch='//////', + alpha=0.3, + ), + Patch( + facecolor=(0.95, 0.6, 0.1), + edgecolor=(0.95, 0.6, 0.1), + label='Table (rotated)', + hatch='//////', + alpha=0.3, + ), + ] + plt.legend( + handles=legend_elements, + bbox_to_anchor=(0.5, -0.02), + loc='upper center', + borderaxespad=0, + fontsize=10, + ncol=2, + ) + plt.gcf().set_size_inches(10, 10) + plt.axis('off') + plt.savefig(out_path, bbox_inches='tight', dpi=150) + plt.close() + + return + + +def visualize_cells(img, cells, out_path): + import matplotlib.pyplot as plt + import matplotlib.patches as patches + from matplotlib.patches import Patch + + plt.imshow(img, interpolation="lanczos") + plt.gcf().set_size_inches(20, 20) + ax = plt.gca() + + for cell in cells: + bbox = cell['bbox'] + + if cell['column header']: + facecolor = (1, 0, 0.45) + edgecolor = (1, 0, 0.45) + alpha = 0.3 + linewidth = 2 + hatch = '//////' + elif cell['projected row header']: + facecolor = (0.95, 0.6, 0.1) + edgecolor = (0.95, 0.6, 0.1) + alpha = 0.3 + linewidth = 2 + hatch = '//////' + else: + facecolor = (0.3, 0.74, 0.8) + edgecolor = (0.3, 0.7, 0.6) + alpha = 0.3 + linewidth = 2 + hatch = '\\\\\\\\\\\\' + + rect = patches.Rectangle( + bbox[:2], + bbox[2] - bbox[0], + bbox[3] - bbox[1], + linewidth=linewidth, + edgecolor='none', + facecolor=facecolor, + alpha=0.1, + ) + ax.add_patch(rect) + rect = patches.Rectangle( + bbox[:2], + bbox[2] - bbox[0], + bbox[3] - bbox[1], + linewidth=linewidth, + edgecolor=edgecolor, + facecolor='none', + linestyle='-', + alpha=alpha, + ) + ax.add_patch(rect) + rect = patches.Rectangle( + bbox[:2], + bbox[2] - bbox[0], + bbox[3] - bbox[1], + linewidth=0, + edgecolor=edgecolor, + facecolor='none', + linestyle='-', + hatch=hatch, + alpha=0.2, + ) + ax.add_patch(rect) + + plt.xticks([], []) + plt.yticks([], []) + + legend_elements = [ + Patch( + facecolor=(0.3, 0.74, 0.8), + edgecolor=(0.3, 0.7, 0.6), + label='Data cell', + hatch='\\\\\\\\\\\\', + alpha=0.3, + ), + Patch( + facecolor=(1, 0, 0.45), + edgecolor=(1, 0, 0.45), + label='Column header cell', + hatch='//////', + alpha=0.3, + ), + Patch( + facecolor=(0.95, 0.6, 0.1), + edgecolor=(0.95, 0.6, 0.1), + label='Projected row header cell', + hatch='//////', + alpha=0.3, + ), + ] + plt.legend( + handles=legend_elements, + bbox_to_anchor=(0.5, -0.02), + loc='upper center', + borderaxespad=0, + fontsize=10, + ncol=3, + ) + plt.gcf().set_size_inches(10, 10) + plt.axis('off') + plt.savefig(out_path, bbox_inches='tight', dpi=150) + plt.close() + + return + + +def output_result(key, val, args, img, img_file): + import json + + if key == 'objects': + # if args.verbose: + # print(val) + out_file = img_file.replace(".jpg", "_objects.json") + with open(os.path.join(args.out_dir, out_file), 'w', encoding='utf-8') as f: + json.dump(val, f) + # if args.visualize: + # out_file = img_file.replace(".jpg", "_fig_tables.jpg") + # out_path = os.path.join(args.out_dir, out_file) + # visualize_detected_tables(img, val, out_path) + elif not key == 'image' and not key == 'tokens': + for idx, elem in enumerate(val): + if key == 'crops': + for idx, cropped_table in enumerate(val): + out_img_file = img_file.replace(".jpg", "_table_{}.jpg".format(idx)) + cropped_table['image'].save( + os.path.join(args.out_dir, out_img_file) + ) + out_words_file = out_img_file.replace(".jpg", "_words.json") + with open(os.path.join(args.out_dir, out_words_file), 'w', encoding='utf-8') as f: + json.dump(cropped_table['tokens'], f) + elif key == 'cells': + out_file = img_file.replace(".jpg", "_{}_objects.json".format(idx)) + with open(os.path.join(args.out_dir, out_file), 'w', encoding='utf-8') as f: + json.dump(elem, f) + # if args.verbose: + # print(elem) + if True: + out_file = img_file.replace(".jpg", "_fig_cells.jpg") + out_path = os.path.join(args.out_dir, out_file) + visualize_cells(img, elem, out_path) + else: + out_file = img_file.replace(".jpg", "_{}.{}".format(idx, key)) + with open(os.path.join(args.out_dir, out_file), 'w', encoding='utf-8') as f: + f.write(elem) + if args.verbose: + print(elem) diff --git a/pix2text/table_postprocess.py b/pix2text/table_postprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..435a744ace713b9b169e7944496e63c0748428b3 --- /dev/null +++ b/pix2text/table_postprocess.py @@ -0,0 +1,906 @@ +""" +Copyright (C) 2021 Microsoft Corporation +""" +# coding: utf-8 +# Adapted from https://github.com/microsoft/table-transformer/blob/main/src/postprocess.py +from collections import defaultdict + +from fitz import Rect + + +def apply_threshold(objects, threshold): + """ + Filter out objects below a certain score. + """ + return [obj for obj in objects if obj['score'] >= threshold] + + +def apply_class_thresholds(bboxes, labels, scores, class_names, class_thresholds): + """ + Filter out bounding boxes whose confidence is below the confidence threshold for + its associated class label. + """ + # Apply class-specific thresholds + indices_above_threshold = [idx for idx, (score, label) in enumerate(zip(scores, labels)) + if score >= class_thresholds[ + class_names[label] + ] + ] + bboxes = [bboxes[idx] for idx in indices_above_threshold] + scores = [scores[idx] for idx in indices_above_threshold] + labels = [labels[idx] for idx in indices_above_threshold] + + return bboxes, scores, labels + + +def iou(bbox1, bbox2): + """ + Compute the intersection-over-union of two bounding boxes. + """ + intersection = Rect(bbox1).intersect(bbox2) + union = Rect(bbox1).include_rect(bbox2) + + union_area = union.get_area() + if union_area > 0: + return intersection.get_area() / union.get_area() + + return 0 + + +def iob(bbox1, bbox2): + """ + Compute the intersection area over box area, for bbox1. + """ + intersection = Rect(bbox1).intersect(bbox2) + + bbox1_area = Rect(bbox1).get_area() + if bbox1_area > 0: + return intersection.get_area() / bbox1_area + + return 0 + + +def objects_to_cells(table, objects_in_table, tokens_in_table, class_map, class_thresholds): + """ + Process the bounding boxes produced by the table structure recognition model + and the token/word/span bounding boxes into table cells. + + Also return a confidence score based on how well the text was able to be + uniquely slotted into the cells detected by the table model. + """ + + table_structures = objects_to_table_structures(table, objects_in_table, tokens_in_table, class_map, + class_thresholds) + + # Check for a valid table + if len(table_structures['columns']) < 1 or len(table_structures['rows']) < 1: + cells = []#None + confidence_score = 0 + else: + cells, confidence_score = table_structure_to_cells(table_structures, tokens_in_table, table['bbox']) + + return table_structures, cells, confidence_score + + +def objects_to_table_structures(table_object, objects_in_table, tokens_in_table, class_names, class_thresholds): + """ + Process the bounding boxes produced by the table structure recognition model into + a *consistent* set of table structures (rows, columns, supercells, headers). + This entails resolving conflicts/overlaps, and ensuring the boxes meet certain alignment + conditions (for example: rows should all have the same width, etc.). + """ + + page_num = table_object['page_num'] + + table_structures = {} + + columns = [obj for obj in objects_in_table if class_names[obj['label']] == 'table column'] + rows = [obj for obj in objects_in_table if class_names[obj['label']] == 'table row'] + headers = [obj for obj in objects_in_table if class_names[obj['label']] == 'table column header'] + supercells = [obj for obj in objects_in_table if class_names[obj['label']] == 'table spanning cell'] + for obj in supercells: + obj['subheader'] = False + subheaders = [obj for obj in objects_in_table if class_names[obj['label']] == 'table projected row header'] + for obj in subheaders: + obj['subheader'] = True + supercells += subheaders + for obj in rows: + obj['header'] = False + for header_obj in headers: + if iob(obj['bbox'], header_obj['bbox']) >= 0.5: + obj['header'] = True + + for row in rows: + row['page'] = page_num + + for column in columns: + column['page'] = page_num + + #Refine table structures + rows = refine_rows(rows, tokens_in_table, class_thresholds['table row']) + columns = refine_columns(columns, tokens_in_table, class_thresholds['table column']) + + # Shrink table bbox to just the total height of the rows + # and the total width of the columns + row_rect = Rect() + for obj in rows: + row_rect.include_rect(obj['bbox']) + column_rect = Rect() + for obj in columns: + column_rect.include_rect(obj['bbox']) + table_object['row_column_bbox'] = [column_rect[0], row_rect[1], column_rect[2], row_rect[3]] + table_object['bbox'] = table_object['row_column_bbox'] + + # Process the rows and columns into a complete segmented table + columns = align_columns(columns, table_object['row_column_bbox']) + rows = align_rows(rows, table_object['row_column_bbox']) + + table_structures['rows'] = rows + table_structures['columns'] = columns + table_structures['headers'] = headers + table_structures['supercells'] = supercells + + if len(rows) > 0 and len(columns) > 1: + table_structures = refine_table_structures(table_object['bbox'], table_structures, tokens_in_table, class_thresholds) + + return table_structures + + +def refine_rows(rows, tokens, score_threshold): + """ + Apply operations to the detected rows, such as + thresholding, NMS, and alignment. + """ + + if len(tokens) > 0: + rows = nms_by_containment(rows, tokens, overlap_threshold=0.5) + remove_objects_without_content(tokens, rows) + else: + rows = nms(rows, match_criteria="object2_overlap", + match_threshold=0.5, keep_higher=True) + if len(rows) > 1: + rows = sort_objects_top_to_bottom(rows) + + return rows + + +def refine_columns(columns, tokens, score_threshold): + """ + Apply operations to the detected columns, such as + thresholding, NMS, and alignment. + """ + + if len(tokens) > 0: + columns = nms_by_containment(columns, tokens, overlap_threshold=0.5) + remove_objects_without_content(tokens, columns) + else: + columns = nms(columns, match_criteria="object2_overlap", + match_threshold=0.25, keep_higher=True) + if len(columns) > 1: + columns = sort_objects_left_to_right(columns) + + return columns + + +def nms_by_containment(container_objects, package_objects, overlap_threshold=0.5): + """ + Non-maxima suppression (NMS) of objects based on shared containment of other objects. + """ + container_objects = sort_objects_by_score(container_objects) + num_objects = len(container_objects) + suppression = [False for obj in container_objects] + + packages_by_container, _, _ = slot_into_containers(container_objects, package_objects, overlap_threshold=overlap_threshold, + unique_assignment=True, forced_assignment=False) + + for object2_num in range(1, num_objects): + object2_packages = set(packages_by_container[object2_num]) + if len(object2_packages) == 0: + suppression[object2_num] = True + for object1_num in range(object2_num): + if not suppression[object1_num]: + object1_packages = set(packages_by_container[object1_num]) + if len(object2_packages.intersection(object1_packages)) > 0: + suppression[object2_num] = True + + final_objects = [obj for idx, obj in enumerate(container_objects) if not suppression[idx]] + return final_objects + + +def slot_into_containers(container_objects, package_objects, overlap_threshold=0.5, + unique_assignment=True, forced_assignment=False): + """ + Slot a collection of objects into the container they occupy most (the container which holds the largest fraction of the object). + """ + best_match_scores = [] + + container_assignments = [[] for container in container_objects] + package_assignments = [[] for package in package_objects] + + if len(container_objects) == 0 or len(package_objects) == 0: + return container_assignments, package_assignments, best_match_scores + + match_scores = defaultdict(dict) + for package_num, package in enumerate(package_objects): + match_scores = [] + package_rect = Rect(package['bbox']) + package_area = package_rect.get_area() + for container_num, container in enumerate(container_objects): + container_rect = Rect(container['bbox']) + intersect_area = container_rect.intersect(package['bbox']).get_area() + overlap_fraction = intersect_area / package_area + match_scores.append({'container': container, 'container_num': container_num, 'score': overlap_fraction}) + + sorted_match_scores = sort_objects_by_score(match_scores) + + best_match_score = sorted_match_scores[0] + best_match_scores.append(best_match_score['score']) + if forced_assignment or best_match_score['score'] >= overlap_threshold: + container_assignments[best_match_score['container_num']].append(package_num) + package_assignments[package_num].append(best_match_score['container_num']) + + if not unique_assignment: # slot package into all eligible slots + for match_score in sorted_match_scores[1:]: + if match_score['score'] >= overlap_threshold: + container_assignments[match_score['container_num']].append(package_num) + package_assignments[package_num].append(match_score['container_num']) + else: + break + + return container_assignments, package_assignments, best_match_scores + + +def sort_objects_by_score(objects, reverse=True): + """ + Put any set of objects in order from high score to low score. + """ + if reverse: + sign = -1 + else: + sign = 1 + return sorted(objects, key=lambda k: sign*k['score']) + + +def remove_objects_without_content(page_spans, objects): + """ + Remove any objects (these can be rows, columns, supercells, etc.) that don't + have any text associated with them. + """ + for obj in objects[:]: + object_text, _ = extract_text_inside_bbox(page_spans, obj['bbox']) + if len(object_text.strip()) == 0: + objects.remove(obj) + + +def extract_text_inside_bbox(spans, bbox): + """ + Extract the text inside a bounding box. + """ + bbox_spans = get_bbox_span_subset(spans, bbox) + bbox_text = extract_text_from_spans(bbox_spans, remove_integer_superscripts=True) + + return bbox_text, bbox_spans + + +def get_bbox_span_subset(spans, bbox, threshold=0.5): + """ + Reduce the set of spans to those that fall within a bounding box. + + threshold: the fraction of the span that must overlap with the bbox. + """ + span_subset = [] + for span in spans: + if overlaps(span['bbox'], bbox, threshold): + span_subset.append(span) + return span_subset + + +def overlaps(bbox1, bbox2, threshold=0.5): + """ + Test if more than "threshold" fraction of bbox1 overlaps with bbox2. + """ + rect1 = Rect(list(bbox1)) + area1 = rect1.get_area() + if area1 == 0: + return False + return rect1.intersect(list(bbox2)).get_area()/area1 >= threshold + + +def is_int(string): + """ + Test if a string is an integer. + """ + try: + int(string) + return True + except ValueError: + return False + + +def extract_text_from_spans(spans, join_with_space=True, remove_integer_superscripts=True): + """ + Convert a collection of page tokens/words/spans into a single text string. + """ + + if join_with_space: + join_char = " " + else: + join_char = "" + spans_copy = spans[:] + + if remove_integer_superscripts: + for span in spans: + if not 'flags' in span: + continue + flags = span['flags'] + if flags & 2**0: # superscript flag + if is_int(span['text']): + spans_copy.remove(span) + else: + span['superscript'] = True + + if len(spans_copy) == 0: + return "" + + spans_copy.sort(key=lambda span: span['span_num']) + spans_copy.sort(key=lambda span: span['line_num']) + spans_copy.sort(key=lambda span: span['block_num']) + + # Force the span at the end of every line within a block to have exactly one space + # unless the line ends with a space or ends with a non-space followed by a hyphen + line_texts = [] + line_span_texts = [spans_copy[0]['text']] + for span1, span2 in zip(spans_copy[:-1], spans_copy[1:]): + if not span1['block_num'] == span2['block_num'] or not span1['line_num'] == span2['line_num']: + line_text = join_char.join(line_span_texts).strip() + if (len(line_text) > 0 + and not line_text[-1] == ' ' + and not (len(line_text) > 1 and line_text[-1] == "-" and not line_text[-2] == ' ')): + if not join_with_space: + line_text += ' ' + line_texts.append(line_text) + line_span_texts = [span2['text']] + else: + line_span_texts.append(span2['text']) + line_text = join_char.join(line_span_texts) + line_texts.append(line_text) + + return join_char.join(line_texts).strip() + + +def sort_objects_left_to_right(objs): + """ + Put the objects in order from left to right. + """ + return sorted(objs, key=lambda k: k['bbox'][0] + k['bbox'][2]) + + +def sort_objects_top_to_bottom(objs): + """ + Put the objects in order from top to bottom. + """ + return sorted(objs, key=lambda k: k['bbox'][1] + k['bbox'][3]) + + +def align_columns(columns, bbox): + """ + For every column, align the top and bottom boundaries to the final + table bounding box. + """ + try: + for column in columns: + column['bbox'][1] = bbox[1] + column['bbox'][3] = bbox[3] + except Exception as err: + print("Could not align columns: {}".format(err)) + pass + + return columns + + +def align_rows(rows, bbox): + """ + For every row, align the left and right boundaries to the final + table bounding box. + """ + try: + for row in rows: + row['bbox'][0] = bbox[0] + row['bbox'][2] = bbox[2] + except Exception as err: + print("Could not align rows: {}".format(err)) + pass + + return rows + + +def refine_table_structures(table_bbox, table_structures, page_spans, class_thresholds): + """ + Apply operations to the detected table structure objects such as + thresholding, NMS, and alignment. + """ + rows = table_structures["rows"] + columns = table_structures['columns'] + + #columns = fill_column_gaps(columns, table_bbox) + #rows = fill_row_gaps(rows, table_bbox) + + # Process the headers + headers = table_structures['headers'] + headers = apply_threshold(headers, class_thresholds["table column header"]) + headers = nms(headers) + headers = align_headers(headers, rows) + + # Process supercells + supercells = [elem for elem in table_structures['supercells'] if not elem['subheader']] + subheaders = [elem for elem in table_structures['supercells'] if elem['subheader']] + supercells = apply_threshold(supercells, class_thresholds["table spanning cell"]) + subheaders = apply_threshold(subheaders, class_thresholds["table projected row header"]) + supercells += subheaders + # Align before NMS for supercells because alignment brings them into agreement + # with rows and columns first; if supercells still overlap after this operation, + # the threshold for NMS can basically be lowered to just above 0 + supercells = align_supercells(supercells, rows, columns) + supercells = nms_supercells(supercells) + + header_supercell_tree(supercells) + + table_structures['columns'] = columns + table_structures['rows'] = rows + table_structures['supercells'] = supercells + table_structures['headers'] = headers + + return table_structures + + +def nms(objects, match_criteria="object2_overlap", match_threshold=0.05, keep_higher=True): + """ + A customizable version of non-maxima suppression (NMS). + + Default behavior: If a lower-confidence object overlaps more than 5% of its area + with a higher-confidence object, remove the lower-confidence object. + + objects: set of dicts; each object dict must have a 'bbox' and a 'score' field + match_criteria: how to measure how much two objects "overlap" + match_threshold: the cutoff for determining that overlap requires suppression of one object + keep_higher: if True, keep the object with the higher metric; otherwise, keep the lower + """ + if len(objects) == 0: + return [] + + objects = sort_objects_by_score(objects, reverse=keep_higher) + + num_objects = len(objects) + suppression = [False for obj in objects] + + for object2_num in range(1, num_objects): + object2_rect = Rect(objects[object2_num]['bbox']) + object2_area = object2_rect.get_area() + for object1_num in range(object2_num): + if not suppression[object1_num]: + object1_rect = Rect(objects[object1_num]['bbox']) + object1_area = object1_rect.get_area() + intersect_area = object1_rect.intersect(object2_rect).get_area() + try: + if match_criteria=="object1_overlap": + metric = intersect_area / object1_area + elif match_criteria=="object2_overlap": + metric = intersect_area / object2_area + elif match_criteria=="iou": + metric = intersect_area / (object1_area + object2_area - intersect_area) + if metric >= match_threshold: + suppression[object2_num] = True + break + except Exception: + # Intended to recover from divide-by-zero + pass + + return [obj for idx, obj in enumerate(objects) if not suppression[idx]] + + +def align_headers(headers, rows): + """ + Adjust the header boundary to be the convex hull of the rows it intersects + at least 50% of the height of. + + For now, we are not supporting tables with multiple headers, so we need to + eliminate anything besides the top-most header. + """ + + aligned_headers = [] + + for row in rows: + row['header'] = False + + header_row_nums = [] + for header in headers: + for row_num, row in enumerate(rows): + row_height = row['bbox'][3] - row['bbox'][1] + min_row_overlap = max(row['bbox'][1], header['bbox'][1]) + max_row_overlap = min(row['bbox'][3], header['bbox'][3]) + overlap_height = max_row_overlap - min_row_overlap + if overlap_height / row_height >= 0.5: + header_row_nums.append(row_num) + + if len(header_row_nums) == 0: + return aligned_headers + + header_rect = Rect() + if header_row_nums[0] > 0: + header_row_nums = list(range(header_row_nums[0]+1)) + header_row_nums + + last_row_num = -1 + for row_num in header_row_nums: + if row_num == last_row_num + 1: + row = rows[row_num] + row['header'] = True + header_rect = header_rect.include_rect(row['bbox']) + last_row_num = row_num + else: + # Break as soon as a non-header row is encountered. + # This ignores any subsequent rows in the table labeled as a header. + # Having more than 1 header is not supported currently. + break + + header = {'bbox': list(header_rect)} + aligned_headers.append(header) + + return aligned_headers + + +def align_supercells(supercells, rows, columns): + """ + For each supercell, align it to the rows it intersects 50% of the height of, + and the columns it intersects 50% of the width of. + Eliminate supercells for which there are no rows and columns it intersects 50% with. + """ + aligned_supercells = [] + + for supercell in supercells: + supercell['header'] = False + row_bbox_rect = None + col_bbox_rect = None + intersecting_header_rows = set() + intersecting_data_rows = set() + for row_num, row in enumerate(rows): + row_height = row['bbox'][3] - row['bbox'][1] + supercell_height = supercell['bbox'][3] - supercell['bbox'][1] + min_row_overlap = max(row['bbox'][1], supercell['bbox'][1]) + max_row_overlap = min(row['bbox'][3], supercell['bbox'][3]) + overlap_height = max_row_overlap - min_row_overlap + if 'span' in supercell: + overlap_fraction = max(overlap_height/row_height, + overlap_height/supercell_height) + else: + overlap_fraction = overlap_height / row_height + if overlap_fraction >= 0.5: + if 'header' in row and row['header']: + intersecting_header_rows.add(row_num) + else: + intersecting_data_rows.add(row_num) + + # Supercell cannot span across the header boundary; eliminate whichever + # group of rows is the smallest + supercell['header'] = False + if len(intersecting_data_rows) > 0 and len(intersecting_header_rows) > 0: + if len(intersecting_data_rows) > len(intersecting_header_rows): + intersecting_header_rows = set() + else: + intersecting_data_rows = set() + if len(intersecting_header_rows) > 0: + supercell['header'] = True + elif 'span' in supercell: + continue # Require span supercell to be in the header + intersecting_rows = intersecting_data_rows.union(intersecting_header_rows) + # Determine vertical span of aligned supercell + for row_num in intersecting_rows: + if row_bbox_rect is None: + row_bbox_rect = Rect(rows[row_num]['bbox']) + else: + row_bbox_rect = row_bbox_rect.include_rect(rows[row_num]['bbox']) + if row_bbox_rect is None: + continue + + intersecting_cols = [] + for col_num, col in enumerate(columns): + col_width = col['bbox'][2] - col['bbox'][0] + supercell_width = supercell['bbox'][2] - supercell['bbox'][0] + min_col_overlap = max(col['bbox'][0], supercell['bbox'][0]) + max_col_overlap = min(col['bbox'][2], supercell['bbox'][2]) + overlap_width = max_col_overlap - min_col_overlap + if 'span' in supercell: + overlap_fraction = max(overlap_width/col_width, + overlap_width/supercell_width) + # Multiply by 2 effectively lowers the threshold to 0.25 + if supercell['header']: + overlap_fraction = overlap_fraction * 2 + else: + overlap_fraction = overlap_width / col_width + if overlap_fraction >= 0.5: + intersecting_cols.append(col_num) + if col_bbox_rect is None: + col_bbox_rect = Rect(col['bbox']) + else: + col_bbox_rect = col_bbox_rect.include_rect(col['bbox']) + if col_bbox_rect is None: + continue + + supercell_bbox = list(row_bbox_rect.intersect(col_bbox_rect)) + supercell['bbox'] = supercell_bbox + + # Only a true supercell if it joins across multiple rows or columns + if (len(intersecting_rows) > 0 and len(intersecting_cols) > 0 + and (len(intersecting_rows) > 1 or len(intersecting_cols) > 1)): + supercell['row_numbers'] = list(intersecting_rows) + supercell['column_numbers'] = intersecting_cols + aligned_supercells.append(supercell) + + # A span supercell in the header means there must be supercells above it in the header + if 'span' in supercell and supercell['header'] and len(supercell['column_numbers']) > 1: + for row_num in range(0, min(supercell['row_numbers'])): + new_supercell = {'row_numbers': [row_num], 'column_numbers': supercell['column_numbers'], + 'score': supercell['score'], 'propagated': True} + new_supercell_columns = [columns[idx] for idx in supercell['column_numbers']] + new_supercell_rows = [rows[idx] for idx in supercell['row_numbers']] + bbox = [min([column['bbox'][0] for column in new_supercell_columns]), + min([row['bbox'][1] for row in new_supercell_rows]), + max([column['bbox'][2] for column in new_supercell_columns]), + max([row['bbox'][3] for row in new_supercell_rows])] + new_supercell['bbox'] = bbox + aligned_supercells.append(new_supercell) + + return aligned_supercells + + +def nms_supercells(supercells): + """ + A NMS scheme for supercells that first attempts to shrink supercells to + resolve overlap. + If two supercells overlap the same (sub)cell, shrink the lower confidence + supercell to resolve the overlap. If shrunk supercell is empty, remove it. + """ + + supercells = sort_objects_by_score(supercells) + num_supercells = len(supercells) + suppression = [False for supercell in supercells] + + for supercell2_num in range(1, num_supercells): + supercell2 = supercells[supercell2_num] + for supercell1_num in range(supercell2_num): + supercell1 = supercells[supercell1_num] + remove_supercell_overlap(supercell1, supercell2) + if ((len(supercell2['row_numbers']) < 2 and len(supercell2['column_numbers']) < 2) + or len(supercell2['row_numbers']) == 0 or len(supercell2['column_numbers']) == 0): + suppression[supercell2_num] = True + + return [obj for idx, obj in enumerate(supercells) if not suppression[idx]] + + +def header_supercell_tree(supercells): + """ + Make sure no supercell in the header is below more than one supercell in any row above it. + The cells in the header form a tree, but a supercell with more than one supercell in a row + above it means that some cell has more than one parent, which is not allowed. Eliminate + any supercell that would cause this to be violated. + """ + header_supercells = [supercell for supercell in supercells if 'header' in supercell and supercell['header']] + header_supercells = sort_objects_by_score(header_supercells) + + for header_supercell in header_supercells[:]: + ancestors_by_row = defaultdict(int) + min_row = min(header_supercell['row_numbers']) + for header_supercell2 in header_supercells: + max_row2 = max(header_supercell2['row_numbers']) + if max_row2 < min_row: + if (set(header_supercell['column_numbers']).issubset( + set(header_supercell2['column_numbers']))): + for row2 in header_supercell2['row_numbers']: + ancestors_by_row[row2] += 1 + for row in range(0, min_row): + if not ancestors_by_row[row] == 1: + supercells.remove(header_supercell) + break + + +def table_structure_to_cells(table_structures, table_spans, table_bbox): + """ + Assuming the row, column, supercell, and header bounding boxes have + been refined into a set of consistent table structures, process these + table structures into table cells. This is a universal representation + format for the table, which can later be exported to Pandas or CSV formats. + Classify the cells as header/access cells or data cells + based on if they intersect with the header bounding box. + """ + columns = table_structures['columns'] + rows = table_structures['rows'] + supercells = table_structures['supercells'] + cells = [] + subcells = [] + + # Identify complete cells and subcells + for column_num, column in enumerate(columns): + for row_num, row in enumerate(rows): + column_rect = Rect(list(column['bbox'])) + row_rect = Rect(list(row['bbox'])) + cell_rect = row_rect.intersect(column_rect) + header = 'header' in row and row['header'] + cell = {'bbox': list(cell_rect), 'column_nums': [column_num], 'row_nums': [row_num], + 'header': header} + + cell['subcell'] = False + for supercell in supercells: + supercell_rect = Rect(list(supercell['bbox'])) + if (supercell_rect.intersect(cell_rect).get_area() + / cell_rect.get_area()) > 0.5: + cell['subcell'] = True + break + + if cell['subcell']: + subcells.append(cell) + else: + #cell_text = extract_text_inside_bbox(table_spans, cell['bbox']) + #cell['cell_text'] = cell_text + cell['subheader'] = False + cells.append(cell) + + for supercell in supercells: + supercell_rect = Rect(list(supercell['bbox'])) + cell_columns = set() + cell_rows = set() + cell_rect = None + header = True + for subcell in subcells: + subcell_rect = Rect(list(subcell['bbox'])) + subcell_rect_area = subcell_rect.get_area() + if (subcell_rect.intersect(supercell_rect).get_area() + / subcell_rect_area) > 0.5: + if cell_rect is None: + cell_rect = Rect(list(subcell['bbox'])) + else: + cell_rect.include_rect(Rect(list(subcell['bbox']))) + cell_rows = cell_rows.union(set(subcell['row_nums'])) + cell_columns = cell_columns.union(set(subcell['column_nums'])) + # By convention here, all subcells must be classified + # as header cells for a supercell to be classified as a header cell; + # otherwise, this could lead to a non-rectangular header region + header = header and 'header' in subcell and subcell['header'] + if len(cell_rows) > 0 and len(cell_columns) > 0: + cell = {'bbox': list(cell_rect), 'column_nums': list(cell_columns), 'row_nums': list(cell_rows), + 'header': header, 'subheader': supercell['subheader']} + cells.append(cell) + + # Compute a confidence score based on how well the page tokens + # slot into the cells reported by the model + _, _, cell_match_scores = slot_into_containers(cells, table_spans) + try: + mean_match_score = sum(cell_match_scores) / len(cell_match_scores) + min_match_score = min(cell_match_scores) + confidence_score = (mean_match_score + min_match_score)/2 + except: + confidence_score = 0 + + # Dilate rows and columns before final extraction + #dilated_columns = fill_column_gaps(columns, table_bbox) + dilated_columns = columns + #dilated_rows = fill_row_gaps(rows, table_bbox) + dilated_rows = rows + for cell in cells: + column_rect = Rect() + for column_num in cell['column_nums']: + column_rect.include_rect(list(dilated_columns[column_num]['bbox'])) + row_rect = Rect() + for row_num in cell['row_nums']: + row_rect.include_rect(list(dilated_rows[row_num]['bbox'])) + cell_rect = column_rect.intersect(row_rect) + cell['bbox'] = list(cell_rect) + + span_nums_by_cell, _, _ = slot_into_containers(cells, table_spans, overlap_threshold=0.001, + unique_assignment=True, forced_assignment=False) + + for cell, cell_span_nums in zip(cells, span_nums_by_cell): + cell_spans = [table_spans[num] for num in cell_span_nums] + # TODO: Refine how text is extracted; should be character-based, not span-based; + # but need to associate + cell['cell_text'] = extract_text_from_spans(cell_spans, remove_integer_superscripts=False) + cell['spans'] = cell_spans + + # Adjust the row, column, and cell bounding boxes to reflect the extracted text + num_rows = len(rows) + rows = sort_objects_top_to_bottom(rows) + num_columns = len(columns) + columns = sort_objects_left_to_right(columns) + min_y_values_by_row = defaultdict(list) + max_y_values_by_row = defaultdict(list) + min_x_values_by_column = defaultdict(list) + max_x_values_by_column = defaultdict(list) + for cell in cells: + min_row = min(cell["row_nums"]) + max_row = max(cell["row_nums"]) + min_column = min(cell["column_nums"]) + max_column = max(cell["column_nums"]) + for span in cell['spans']: + min_x_values_by_column[min_column].append(span['bbox'][0]) + min_y_values_by_row[min_row].append(span['bbox'][1]) + max_x_values_by_column[max_column].append(span['bbox'][2]) + max_y_values_by_row[max_row].append(span['bbox'][3]) + for row_num, row in enumerate(rows): + if len(min_x_values_by_column[0]) > 0: + row['bbox'][0] = min(min_x_values_by_column[0]) + if len(min_y_values_by_row[row_num]) > 0: + row['bbox'][1] = min(min_y_values_by_row[row_num]) + if len(max_x_values_by_column[num_columns-1]) > 0: + row['bbox'][2] = max(max_x_values_by_column[num_columns-1]) + if len(max_y_values_by_row[row_num]) > 0: + row['bbox'][3] = max(max_y_values_by_row[row_num]) + for column_num, column in enumerate(columns): + if len(min_x_values_by_column[column_num]) > 0: + column['bbox'][0] = min(min_x_values_by_column[column_num]) + if len(min_y_values_by_row[0]) > 0: + column['bbox'][1] = min(min_y_values_by_row[0]) + if len(max_x_values_by_column[column_num]) > 0: + column['bbox'][2] = max(max_x_values_by_column[column_num]) + if len(max_y_values_by_row[num_rows-1]) > 0: + column['bbox'][3] = max(max_y_values_by_row[num_rows-1]) + for cell in cells: + row_rect = Rect() + column_rect = Rect() + for row_num in cell['row_nums']: + row_rect.include_rect(list(rows[row_num]['bbox'])) + for column_num in cell['column_nums']: + column_rect.include_rect(list(columns[column_num]['bbox'])) + cell_rect = row_rect.intersect(column_rect) + if cell_rect.get_area() > 0: + cell['bbox'] = list(cell_rect) + pass + + return cells, confidence_score + + +def remove_supercell_overlap(supercell1, supercell2): + """ + This function resolves overlap between supercells (supercells must be + disjoint) by iteratively shrinking supercells by the fewest grid cells + necessary to resolve the overlap. + Example: + If two supercells overlap at grid cell (R, C), and supercell #1 is less + confident than supercell #2, we eliminate either row R from supercell #1 + or column C from supercell #1 by comparing the number of columns in row R + versus the number of rows in column C. If the number of columns in row R + is less than the number of rows in column C, we eliminate row R from + supercell #1. This resolves the overlap by removing fewer grid cells from + supercell #1 than if we eliminated column C from it. + """ + common_rows = set(supercell1['row_numbers']).intersection(set(supercell2['row_numbers'])) + common_columns = set(supercell1['column_numbers']).intersection(set(supercell2['column_numbers'])) + + # While the supercells have overlapping grid cells, continue shrinking the less-confident + # supercell one row or one column at a time + while len(common_rows) > 0 and len(common_columns) > 0: + # Try to shrink the supercell as little as possible to remove the overlap; + # if the supercell has fewer rows than columns, remove an overlapping column, + # because this removes fewer grid cells from the supercell; + # otherwise remove an overlapping row + if len(supercell2['row_numbers']) < len(supercell2['column_numbers']): + min_column = min(supercell2['column_numbers']) + max_column = max(supercell2['column_numbers']) + if max_column in common_columns: + common_columns.remove(max_column) + supercell2['column_numbers'].remove(max_column) + elif min_column in common_columns: + common_columns.remove(min_column) + supercell2['column_numbers'].remove(min_column) + else: + supercell2['column_numbers'] = [] + common_columns = set() + else: + min_row = min(supercell2['row_numbers']) + max_row = max(supercell2['row_numbers']) + if max_row in common_rows: + common_rows.remove(max_row) + supercell2['row_numbers'].remove(max_row) + elif min_row in common_rows: + common_rows.remove(min_row) + supercell2['row_numbers'].remove(min_row) + else: + supercell2['row_numbers'] = [] + common_rows = set() diff --git a/pix2text/text_formula_ocr.py b/pix2text/text_formula_ocr.py new file mode 100644 index 0000000000000000000000000000000000000000..c2ffd408ec35e9e0ba7bf67d51de108f9d3cac3b --- /dev/null +++ b/pix2text/text_formula_ocr.py @@ -0,0 +1,742 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). + +import logging +import re +from itertools import chain +from pathlib import Path +from typing import Dict, Any, Optional, Union, List +from copy import copy + +from PIL import Image +import numpy as np +import torch +from cnstd.utils import box_partial_overlap +from spellchecker import SpellChecker + +from .utils import ( + custom_deepcopy, + sort_boxes, + merge_adjacent_bboxes, + adjust_line_height, + adjust_line_width, + rotated_box_to_horizontal, + is_valid_box, + list2box, + select_device, + prepare_imgs, + merge_line_texts, + remove_overlap_text_bbox, + y_overlap, +) +from .ocr_engine import prepare_ocr_engine, TextOcrEngine +from .formula_detector import MathFormulaDetector +from .latex_ocr import LatexOCR +from .utils import ( + read_img, + save_layout_img, +) + +logger = logging.getLogger(__name__) + + +DEFAULT_CONFIGS = { + 'mfd': {}, + 'text': {}, + 'formula': {}, +} +# see: https://pypi.org/project/pyspellchecker +CHECKER_SUPPORTED_LANGUAGES = { + 'en', + 'es', + 'fr', + 'pt', + 'de', + 'it', + 'ru', + 'ar', + 'eu', + 'lv', + 'nl', +} + + +class TextFormulaOCR(object): + def __init__( + self, + *, + text_ocr: Optional[TextOcrEngine] = None, + mfd: Optional[Any] = None, + latex_ocr: Optional[LatexOCR] = None, + spellchecker: Optional[SpellChecker] = None, + enable_formula: bool = True, + **kwargs, + ): + """ + Recognize text and formula from an image. + Args: + text_ocr (Optional[TextOcrEngine]): Text OCR engine; defaults to `None`. + mfd (Optional[Any]): Math Formula Detector; defaults to `None`. + latex_ocr (Optional[LatexOCR]): Latex OCR engine; defaults to `None`. + spellchecker (Optional[SpellChecker]): Spell Checker; defaults to `None`. + enable_formula (bool): Whether to enable the capability of Math Formula Detection (MFD) and Recognition (MFR); defaults to `True`. + **kwargs (): + """ + if text_ocr is None: + text_config = custom_deepcopy(DEFAULT_CONFIGS['text']) + device = select_device(device=None) + text_config['context'] = device + logger.warning( + f'text_ocr must not be None. Using default text_ocr engine instead, with config: {text_config}.' + ) + text_ocr = prepare_ocr_engine( + languages=('en', 'ch_sim'), ocr_engine_config=text_config + ) + # if mfd is None or latex_ocr is None: + # default_ocr = TextFormulaOCR.from_config() + # mfd = default_ocr.mfd if mfd is None else mfd + # text_ocr = default_ocr.text_ocr if text_ocr is None else text_ocr + # latex_ocr = default_ocr.latex_ocr if latex_ocr is None else latex_ocr + # del default_ocr + + self.text_ocr = text_ocr + self.mfd = mfd + self.latex_ocr = latex_ocr + self.spellchecker = spellchecker + self.enable_formula = enable_formula + + @classmethod + def from_config( + cls, + total_configs: Optional[dict] = None, + enable_formula: bool = True, + enable_spell_checker: bool = True, + device: str = None, + **kwargs, + ): + """ + Args: + total_configs (dict): Configuration information for Pix2Text; defaults to `None`, which means using the default configuration. Usually the following keys are used: + + * languages (str or Sequence[str]): The language code(s) of the text to be recognized; defaults to `('en', 'ch_sim')`. + * mfd (dict): Configuration information for the Analyzer model; defaults to `None`, which means using the default configuration. + * text (dict): Configuration information for the Text OCR model; defaults to `None`, which means using the default configuration. + * formula (dict): Configuration information for Math Formula OCR model; defaults to `None`, which means using the default configuration. + enable_formula (bool): Whether to enable the capability of Math Formula Detection (MFD) and Recognition (MFR); defaults to True. + enable_spell_checker (bool): Whether to enable the capability of Spell Checker; defaults to True. + device (str, optional): What device to use for computation, supports `['cpu', 'cuda', 'gpu', 'mps']`; defaults to None, which selects the device automatically. + **kwargs (): Reserved for other parameters; not currently used. + """ + total_configs = total_configs or DEFAULT_CONFIGS + languages = total_configs.get('languages', ('en', 'ch_sim')) + text_config = total_configs.get('text', dict()) + mfd_config = total_configs.get('mfd', dict()) + formula_config = total_configs.get('formula', dict()) + + device = select_device(device) + mfd_config, text_config, formula_config = cls.prepare_configs( + mfd_config, text_config, formula_config, device, + ) + + text_ocr = prepare_ocr_engine(languages, text_config) + + if enable_formula: + mfd = MathFormulaDetector(**mfd_config) + latex_ocr = LatexOCR(**formula_config) + else: + mfd = None + latex_ocr = None + + spellchecker = None + if enable_spell_checker: + checker_languages = set(languages) & CHECKER_SUPPORTED_LANGUAGES + if checker_languages: + spellchecker = SpellChecker(language=checker_languages) + + return cls( + text_ocr=text_ocr, + mfd=mfd, + latex_ocr=latex_ocr, + spellchecker=spellchecker, + enable_formula=enable_formula, + **kwargs, + ) + + @classmethod + def prepare_configs( + cls, mfd_config, text_config, formula_config, device, + ): + def _to_default(_conf, _def_val): + if not _conf: + _conf = custom_deepcopy(_def_val) + return custom_deepcopy(_conf) + + mfd_config = _to_default(mfd_config, DEFAULT_CONFIGS['mfd']) + mfd_config['device'] = device + text_config = _to_default(text_config, DEFAULT_CONFIGS['text']) + text_config['context'] = device + formula_config = _to_default(formula_config, DEFAULT_CONFIGS['formula']) + formula_config['device'] = device + return ( + mfd_config, + text_config, + formula_config, + ) + + @property + def languages(self): + return self.text_ocr.languages + + def __call__( + self, img: Union[str, Path, Image.Image], **kwargs + ) -> List[Dict[str, Any]]: + return self.recognize(img, **kwargs) + + def recognize( + self, img: Union[str, Path, Image.Image], return_text: bool = True, **kwargs + ) -> Union[str, List[Dict[str, Any]]]: + """ + Perform Mathematical Formula Detection (MFD) on the image, and then recognize the information contained in each section. + + Args: + img (str or Image.Image): an image path, or `Image.Image` loaded by `Image.open()` + return_text (bool): Whether to return only the recognized text; default value is `True` + kwargs (): + * contain_formula (bool): If `True`, the image will be recognized as a mixed image (text and formula). If `False`, it will be recognized as a text; default value is `True` + * resized_shape (int): Resize the image width to this size for processing; default value is `768` + * save_analysis_res (str): Save the parsed result image in this file; default value is `None`, which means not to save + * mfr_batch_size (int): batch size for MFR; When running on GPU, this value is suggested to be set to greater than 1; default value is `1` + * embed_sep (tuple): Prefix and suffix for embedding latex; only effective when `return_text` is `True`; default value is `(' $', '$ ')` + * isolated_sep (tuple): Prefix and suffix for isolated latex; only effective when `return_text` is `True`; default value is two-dollar signs + * line_sep (str): The separator between lines of text; only effective when `return_text` is `True`; default value is a line break + * auto_line_break (bool): Automatically line break the recognized text; only effective when `return_text` is `True`; default value is `True` + * det_text_bbox_max_width_expand_ratio (float): Expand the width of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.3` + * det_text_bbox_max_height_expand_ratio (float): Expand the height of the detected text bbox. This value represents the maximum expansion ratio above and below relative to the original bbox height; default value is `0.2` + * embed_ratio_threshold (float): The overlap threshold for embed formulas and text lines; default value is `0.6`. + When the overlap between an embed formula and a text line is greater than or equal to this threshold, + the embed formula and the text line are considered to be on the same line; + otherwise, they are considered to be on different lines. + * formula_rec_kwargs (dict): generation arguments passed to formula recognizer `latex_ocr`; default value is `{}` + + Returns: a str when `return_text` is `True`, or a list of ordered (top to bottom, left to right) dicts when `return_text` is `False`, + with each dict representing one detected box, containing keys: + + * `type`: The category of the image; Optional: 'text', 'isolated', 'embedding' + * `text`: The recognized text or Latex formula + * `score`: The confidence score [0, 1]; the higher, the more confident + * `position`: Position information of the block, `np.ndarray`, with shape of [4, 2] + * `line_number`: The line number of the box (first line `line_number==0`), boxes with the same value indicate they are on the same line + + """ + resized_shape = kwargs.get('resized_shape', 768) + if isinstance(img, Image.Image): + img0 = img.convert('RGB') + else: + img0 = read_img(img, return_type='Image') + w, h = img0.size + ratio = resized_shape / w + resized_shape = (int(h * ratio), resized_shape) # (H, W) + # logger.debug('MFD Result: %s', analyzer_outs) + analyzer_outs = [] + crop_patches = [] + mf_results = [] + enable_formula = kwargs.get('contain_formula', True) and self.enable_formula + if enable_formula and self.mfd is not None and self.latex_ocr is not None: + analyzer_outs = self.mfd(img0.copy(), resized_shape=resized_shape) + for mf_box_info in analyzer_outs: + box = mf_box_info['box'] + xmin, ymin, xmax, ymax = ( + int(box[0][0]), + int(box[0][1]), + int(box[2][0]), + int(box[2][1]), + ) + crop_patch = img0.crop((xmin, ymin, xmax, ymax)) + crop_patches.append(crop_patch) + + mfr_batch_size = kwargs.get('mfr_batch_size', 1) + formula_rec_kwargs = kwargs.get('formula_rec_kwargs', {}) + mf_results = self.latex_ocr.recognize( + crop_patches, batch_size=mfr_batch_size, **formula_rec_kwargs + ) + + assert len(mf_results) == len(analyzer_outs) + + mf_outs = [] + for mf_box_info, patch_out in zip(analyzer_outs, mf_results): + text = patch_out['text'] + mf_outs.append( + { + 'type': mf_box_info['type'], + 'text': text, + 'position': mf_box_info['box'], + 'score': patch_out['score'], + } + ) + + masked_img = np.array(img0.copy()) + # 把公式部分mask掉,然后对其他部分进行OCR + for mf_box_info in analyzer_outs: + if mf_box_info['type'] in ('isolated', 'embedding'): + box = mf_box_info['box'] + xmin, ymin = max(0, int(box[0][0]) - 1), max(0, int(box[0][1]) - 1) + xmax, ymax = ( + min(img0.size[0], int(box[2][0]) + 1), + min(img0.size[1], int(box[2][1]) + 1), + ) + masked_img[ymin:ymax, xmin:xmax, :] = 255 + masked_img = Image.fromarray(masked_img) + + text_box_infos = self.text_ocr.detect_only( + np.array(img0), resized_shape=resized_shape + ) + box_infos = [] + for line_box_info in text_box_infos['detected_texts']: + # crop_img_info['box'] 可能是一个带角度的矩形框,需要转换成水平的矩形框 + _text_box = rotated_box_to_horizontal(line_box_info['position']) + if not is_valid_box(_text_box, min_height=8, min_width=2): + continue + box_infos.append({'position': _text_box}) + max_width_expand_ratio = kwargs.get('det_text_bbox_max_width_expand_ratio', 0.3) + if self.text_ocr.name == 'cnocr': + box_infos: list[dict] = adjust_line_width( + text_box_infos=box_infos, + formula_box_infos=mf_outs, + img_width=img0.size[0], + max_expand_ratio=max_width_expand_ratio, + ) + box_infos = remove_overlap_text_bbox(box_infos, mf_outs) + + def _to_iou_box(ori): + return torch.tensor([ori[0][0], ori[0][1], ori[2][0], ori[2][1]]).unsqueeze( + 0 + ) + + embed_ratio_threshold = kwargs.get('embed_ratio_threshold', 0.6) + total_text_boxes = [] + for line_box_info in box_infos: + _line_box = _to_iou_box(line_box_info['position']) + _embed_mfs = [] + for mf_box_info in mf_outs: + if mf_box_info['type'] == 'embedding': + _mf_box = _to_iou_box(mf_box_info['position']) + overlap_area_ratio = float( + box_partial_overlap(_line_box, _mf_box).squeeze() + ) + if overlap_area_ratio >= embed_ratio_threshold or ( + overlap_area_ratio > 0 + and y_overlap(line_box_info, mf_box_info, key='position') + > embed_ratio_threshold + ): + _embed_mfs.append( + { + 'position': _mf_box[0].int().tolist(), + 'text': mf_box_info['text'], + 'type': mf_box_info['type'], + } + ) + + ocr_boxes = self._split_line_image(_line_box, _embed_mfs) + total_text_boxes.extend(ocr_boxes) + + outs = copy(mf_outs) + for box in total_text_boxes: + box['position'] = list2box(*box['position']) + outs.append(box) + outs = sort_boxes(outs, key='position') + outs = [merge_adjacent_bboxes(bboxes) for bboxes in outs] + max_height_expand_ratio = kwargs.get( + 'det_text_bbox_max_height_expand_ratio', 0.2 + ) + outs = adjust_line_height( + outs, img0.size[1], max_expand_ratio=max_height_expand_ratio + ) + + for line_idx, line_boxes in enumerate(outs): + for box in line_boxes: + if box['type'] != 'text': + continue + bbox = box['position'] + xmin, ymin, xmax, ymax = ( + int(bbox[0][0]), + int(bbox[0][1]), + int(bbox[2][0]), + int(bbox[2][1]), + ) + crop_patch = np.array(masked_img.crop((xmin, ymin, xmax, ymax))) + part_res = self.text_ocr.recognize_only(crop_patch) + box['text'] = part_res['text'] + box['score'] = part_res['score'] + outs[line_idx] = [box for box in line_boxes if box['text'].strip()] + + logger.debug(outs) + outs = self._post_process(outs) + + outs = list(chain(*outs)) + if kwargs.get('save_analysis_res'): + save_layout_img( + img0, + ('text', 'isolated', 'embedding'), + outs, + kwargs.get('save_analysis_res'), + ) + + if return_text: + embed_sep = kwargs.get('embed_sep', (' $', '$ ')) + isolated_sep = kwargs.get('isolated_sep', ('$$\n', '\n$$')) + line_sep = kwargs.get('line_sep', '\n') + auto_line_break = kwargs.get('auto_line_break', True) + outs = merge_line_texts( + outs, + auto_line_break, + line_sep, + embed_sep, + isolated_sep, + self.spellchecker, + ) + + return outs + + def _post_process(self, outs): + match_pairs = [ + (',', ',,'), + ('.', '.。'), + ('?', '??'), + ] + formula_tag = '^[(\(]\d+(\.\d+)*[)\)]$' + + def _match(a1, a2): + matched = False + for b1, b2 in match_pairs: + if a1 in b1 and a2 in b2: + matched = True + break + return matched + + for idx, line_boxes in enumerate(outs): + if ( + any([_lang in ('ch_sim', 'ch_tra') for _lang in self.languages]) + and len(line_boxes) > 1 + and line_boxes[-1]['type'] == 'text' + and line_boxes[-2]['type'] != 'text' + ): + if line_boxes[-1]['text'].lower() == 'o': + line_boxes[-1]['text'] = '。' + if len(line_boxes) > 1: + # 去掉边界上多余的标点 + for _idx2, box in enumerate(line_boxes[1:]): + if ( + box['type'] == 'text' + and line_boxes[_idx2]['type'] == 'embedding' + ): # if the current box is text and the previous box is embedding + if _match(line_boxes[_idx2]['text'][-1], box['text'][0]) and ( + not line_boxes[_idx2]['text'][:-1].endswith('\\') + and not line_boxes[_idx2]['text'][:-1].endswith(r'\end') + ): + line_boxes[_idx2]['text'] = line_boxes[_idx2]['text'][:-1] + # 把 公式 tag 合并到公式里面去 + for _idx2, box in enumerate(line_boxes[1:]): + if ( + box['type'] == 'text' + and line_boxes[_idx2]['type'] == 'isolated' + ): # if the current box is text and the previous box is embedding + if y_overlap(line_boxes[_idx2], box, key='position') > 0.9: + if re.match(formula_tag, box['text']): + # 去掉开头和结尾的括号 + tag_text = box['text'][1:-1] + line_boxes[_idx2]['text'] = line_boxes[_idx2][ + 'text' + ] + ' \\tag{{{}}}'.format(tag_text) + new_xmax = max( + line_boxes[_idx2]['position'][2][0], + box['position'][2][0], + ) + line_boxes[_idx2]['position'][1][0] = line_boxes[_idx2][ + 'position' + ][2][0] = new_xmax + box['text'] = '' + + outs[idx] = [box for box in line_boxes if box['text'].strip()] + return outs + + @classmethod + def _split_line_image(cls, line_box, embed_mfs): + # 利用embedding formula所在位置,把单行文字图片切割成多个小段,之后这些小段会分别扔进OCR进行文字识别 + line_box = line_box[0] + if not embed_mfs: + return [{'position': line_box.int().tolist(), 'type': 'text'}] + embed_mfs.sort(key=lambda x: x['position'][0]) + + outs = [] + start = int(line_box[0]) + xmax, ymin, ymax = int(line_box[2]), int(line_box[1]), int(line_box[-1]) + for mf in embed_mfs: + _xmax = min(xmax, int(mf['position'][0]) + 1) + if start + 8 < _xmax: + outs.append({'position': [start, ymin, _xmax, ymax], 'type': 'text'}) + start = int(mf['position'][2]) + if _xmax >= xmax: + break + if start < xmax: + outs.append({'position': [start, ymin, xmax, ymax], 'type': 'text'}) + return outs + + def recognize_text( + self, + imgs: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]], + return_text: bool = True, + rec_config: Optional[dict] = None, + **kwargs, + ) -> Union[str, List[str], List[Any], List[List[Any]]]: + """ + Recognize a pure Text Image. + Args: + imgs (Union[str, Path, Image.Image], List[str], List[Path], List[Image.Image]): The image or list of images + return_text (bool): Whether to return only the recognized text; default value is `True` + rec_config (Optional[dict]): The config for recognition + kwargs (): Other parameters for `text_ocr.ocr()` + + Returns: Text str or list of text strs when `return_text` is True; + `List[Any]` or `List[List[Any]]` when `return_text` is False, with the same length as `imgs` and the following keys: + + * `position`: Position information of the block, `np.ndarray`, with a shape of [4, 2] + * `text`: The recognized text + * `score`: The confidence score [0, 1]; the higher, the more confident + + """ + is_single_image = False + if isinstance(imgs, (str, Path, Image.Image)): + imgs = [imgs] + is_single_image = True + + input_imgs = prepare_imgs(imgs) + + outs = [] + for image in input_imgs: + result = self.text_ocr.ocr(np.array(image), rec_config=rec_config, **kwargs) + if return_text: + texts = [_one['text'] for _one in result] + result = '\n'.join(texts) + outs.append(result) + + if kwargs.get('save_analysis_res'): + save_layout_img( + input_imgs[0], ['text'], outs[0], kwargs.get('save_analysis_res'), + ) + + if is_single_image: + return outs[0] + return outs + + def recognize_formula( + self, + imgs: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]], + batch_size: int = 1, + return_text: bool = True, + rec_config: Optional[dict] = None, + **kwargs, + ) -> Union[str, List[str], Dict[str, Any], List[Dict[str, Any]]]: + """ + Recognize pure Math Formula images to LaTeX Expressions + Args: + imgs (Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]): The image or list of images + batch_size (int): The batch size + return_text (bool): Whether to return only the recognized text; default value is `True` + rec_config (Optional[dict]): The config for recognition + **kwargs (): Special model parameters. Not used for now + + Returns: The LaTeX Expression or list of LaTeX Expressions; + str or List[str] when `return_text` is True; + Dict[str, Any] or List[Dict[str, Any]] when `return_text` is False, with the following keys: + + * `text`: The recognized LaTeX text + * `score`: The confidence score [0, 1]; the higher, the more confident + + """ + if not self.enable_formula: + raise RuntimeError('Formula recognition is not enabled') + if self.latex_ocr is None: + raise RuntimeError('`latex_ocr` model MUST NOT be None') + outs = self.latex_ocr.recognize( + imgs, batch_size=batch_size, rec_config=rec_config, **kwargs + ) + if return_text: + if isinstance(outs, dict): + outs = outs['text'] + elif isinstance(outs, list): + outs = [one['text'] for one in outs] + + return outs + + +# 基于 Vlm 实现一个 TextFormulaOCR 的子类 +class VlmTextFormulaOCR(TextFormulaOCR): + def __init__( + self, + *, + vlm: Optional[Any] = None, + spellchecker: Optional[SpellChecker] = None, + **kwargs, + ): + """ + Recognize text and formula from an image. + Args: + vlm (Optional[Any]): VLM model; defaults to `None`. + spellchecker (Optional[SpellChecker]): Spell Checker; defaults to `None`. + **kwargs (): not used for now. + """ + if vlm is None: + raise ValueError('vlm must not be None') + self.vlm = vlm + self.spellchecker = spellchecker + + @classmethod + def from_config( + cls, + total_configs: Optional[dict] = None, + enable_spell_checker: bool = True, + **kwargs, + ): + """ + Args: + total_configs (dict): Configuration information for VlmTextFormulaOCR; defaults to `None`, which means using the default configuration. Usually the following keys are used: + * languages (str or Sequence[str]): The language code(s) of the text to be recognized; defaults to `('en', 'ch_sim')`. + enable_spell_checker (bool): Whether to enable the capability of Spell Checker; defaults to True. + **kwargs (): Reserved for other parameters: + * model_name (str): The name of the VLM model; defaults to `None`, which means using the default model. + * api_key (str): The API key for the VLM model; defaults to `None`, which means using the default API key. + """ + from .vlm_api import Vlm + + total_configs = total_configs or {} + # Combine configs with any additional kwargs + all_kwargs = kwargs.copy() + if total_configs: + all_kwargs.update(total_configs) + + vlm = Vlm( + model_name=all_kwargs.pop("model_name", None), + api_key=all_kwargs.pop("api_key", None), + ) + + spellchecker = None + if enable_spell_checker: + languages = total_configs.get('languages', ('en', 'ch_sim')) + checker_languages = set(languages) & CHECKER_SUPPORTED_LANGUAGES + if checker_languages: + spellchecker = SpellChecker(language=checker_languages) + + return cls( + vlm=vlm, + spellchecker=spellchecker, + **all_kwargs + ) + + def recognize( + self, img: Union[str, Path, Image.Image], return_text: bool = True, **kwargs + ) -> Union[str, List[Dict[str, Any]]]: + """ + Perform Mathematical Formula Detection (MFD) on the image, and then recognize the information contained in each section. + Args: + img (str or Image.Image): an image path, or `Image.Image` loaded by `Image.open()` + return_text (bool): Whether to return only the recognized text; default value is `True` + kwargs (): Other parameters for `vlm.__call__()`, + * `prompt`: The prompt for the VLM model + + Returns: a str when `return_text` is `True`, or a list of ordered (top to bottom, left to right) dicts when `return_text` is `False`, + with each dict representing one detected box, containing keys: + + * `type`: The category of the image; Optional: 'text', 'isolated', 'embedding' + * `text`: The recognized text or Latex formula + * `score`: The confidence score [0, 1]; the higher, the more confident + * `position`: Position information of the block, `np.ndarray`, with shape of [4, 2] + * `line_number`: The line number of the box (first line `line_number==0`), boxes with the same value indicate they are on the same line + + """ + resized_shape = kwargs.get('resized_shape', 768) + if isinstance(img, Image.Image): + img0 = img.convert('RGB') + else: + img0 = read_img(img, return_type='Image') + w, h = img0.size + result = self.vlm(img_path=img0, auto_resize=True, **kwargs) + if return_text: + return result["text"] + + result["type"] = "text" + result["position"] = np.array([[0, 0], [w-1, 0], [w-1, h-1], [0, h-1]]) + result["line_number"] = 0 + return [result] + + def recognize_text( + self, + imgs: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]], + return_text: bool = True, + rec_config: Optional[dict] = None, + **kwargs, + ) -> Union[str, List[str], List[Any], List[List[Any]]]: + return self._recognize_batch(imgs, res_type='text', return_text=return_text, rec_config=rec_config) + + def recognize_formula( + self, + imgs: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]], + batch_size: int = 1, + return_text: bool = True, + rec_config: Optional[dict] = None, + **kwargs, + ) -> Union[str, List[str], Dict[str, Any], List[Dict[str, Any]]]: + """ + Recognize pure Math Formula images to LaTeX Expressions + Args: + imgs (Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]): The image or list of images + batch_size (int): The batch size. Useless here + return_text (bool): Whether to return only the recognized text; default value is `True` + rec_config (Optional[dict]): The config for recognition + **kwargs (): Special model parameters. Not used for now + + Returns: The LaTeX Expression or list of LaTeX Expressions; + str or List[str] when `return_text` is True; + Dict[str, Any] or List[Dict[str, Any]] when `return_text` is False, with the following keys: + + * `text`: The recognized LaTeX text + * `score`: The confidence score [0, 1]; the higher, the more confident + + """ + return self._recognize_batch(imgs, res_type='formula', return_text=return_text, rec_config=rec_config) + + def _recognize_batch(self, imgs, *, res_type, return_text = True, rec_config = None): + rec_config = rec_config or {} + if isinstance(imgs, (str, Path, Image.Image)): + result = self.recognize(imgs, return_text, **rec_config) + if not return_text: + result = result[0] + return result + + results = self.vlm(imgs, **rec_config) + if return_text: + results = [one['text'] for one in results] + else: + for img, result in zip(imgs, results): + if isinstance(img, Image.Image): + w, h = img.size + else: + with read_img(img, return_type='Image') as img0: + w, h = img0.size + + result["type"] = res_type + result["position"] = np.array([[0, 0], [w-1, 0], [w-1, h-1], [0, h-1]]) + result["line_number"] = 0 + return results + + +if __name__ == '__main__': + from .utils import set_logger + + logger = set_logger(log_level='DEBUG') + + p2t = TextFormulaOCR() + img = 'docs/examples/english.jpg' + img = read_img(img, return_type='Image') + out = p2t.recognize(img) + logger.info(out) diff --git a/pix2text/utils.py b/pix2text/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..379bdc5f6ce42fc4dc0493b467dc651d095c3940 --- /dev/null +++ b/pix2text/utils.py @@ -0,0 +1,1527 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). + +import hashlib +import os +import re +import shutil +import shlex +from copy import deepcopy +from functools import cmp_to_key +from pathlib import Path +import logging +import platform +import subprocess +from typing import Union, List, Any, Dict +from collections import Counter, defaultdict + +from PIL import Image, ImageOps +import numpy as np +from numpy import random +import torch +from torchvision.utils import save_image + +from .consts import MODEL_VERSION + +fmt = '[%(levelname)s %(asctime)s %(funcName)s:%(lineno)d] %(' 'message)s ' +logging.basicConfig(format=fmt) +logging.captureWarnings(True) +logger = logging.getLogger() + + +def set_logger(log_file=None, log_level=logging.INFO, log_file_level=logging.NOTSET): + """ + Example: + >>> set_logger(log_file) + >>> logger.info("abc'") + """ + log_format = logging.Formatter(fmt) + logger.setLevel(log_level) + console_handler = logging.StreamHandler() + console_handler.setFormatter(log_format) + logger.handlers = [console_handler] + if log_file and log_file != '': + if not Path(log_file).parent.exists(): + os.makedirs(Path(log_file).parent) + if isinstance(log_file, Path): + log_file = str(log_file) + file_handler = logging.FileHandler(log_file) + file_handler.setLevel(log_file_level) + file_handler.setFormatter(log_format) + logger.addHandler(file_handler) + return logger + + +def custom_deepcopy(value): + if isinstance(value, dict): + return {key: custom_deepcopy(val) for key, val in value.items()} + elif isinstance(value, list): + return [custom_deepcopy(item) for item in value] + elif isinstance(value, tuple): + return tuple([custom_deepcopy(item) for item in value]) + elif isinstance(value, set): + return set([custom_deepcopy(item) for item in value]) + else: + try: + return deepcopy(value) + except TypeError: + return value # Return the original value if it cannot be deep copied + + +def select_device(device) -> str: + if isinstance(device, str) and device.lower() == "gpu": + device = "cuda" + if device is not None: + return device + + device = 'mps' if torch.backends.mps.is_available() else 'cpu' + if torch.cuda.is_available(): + device = 'cuda' + + return device + + +def data_dir_default(): + """ + + :return: default data directory depending on the platform and environment variables + """ + system = platform.system() + if system == 'Windows': + return os.path.join(os.environ.get('APPDATA'), 'pix2text') + else: + return os.path.join(os.path.expanduser("~"), '.pix2text') + + +def data_dir(): + """ + + :return: data directory in the filesystem for storage, for example when downloading models + """ + return os.getenv('PIX2TEXT_HOME', data_dir_default()) + + +def to_numpy(tensor: torch.Tensor) -> np.ndarray: + return ( + tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() + ) + + +def check_sha1(filename, sha1_hash): + """Check whether the sha1 hash of the file content matches the expected hash. + Parameters + ---------- + filename : str + Path to the file. + sha1_hash : str + Expected sha1 hash in hexadecimal digits. + Returns + ------- + bool + Whether the file content matches the expected hash. + """ + sha1 = hashlib.sha1() + with open(filename, 'rb') as f: + while True: + data = f.read(1048576) + if not data: + break + sha1.update(data) + + sha1_file = sha1.hexdigest() + l = min(len(sha1_file), len(sha1_hash)) + return sha1.hexdigest()[0:l] == sha1_hash[0:l] + + +def read_tsv_file(fp, sep='\t', img_folder=None, mode='eval'): + img_fp_list, labels_list = [], [] + num_fields = 2 if mode != 'test' else 1 + with open(fp) as f: + for line in f: + fields = line.strip('\n').split(sep) + assert len(fields) == num_fields + img_fp = ( + os.path.join(img_folder, fields[0]) + if img_folder is not None + else fields[0] + ) + img_fp_list.append(img_fp) + + if mode != 'test': + labels = fields[1].split(' ') + labels_list.append(labels) + + return (img_fp_list, labels_list) if mode != 'test' else (img_fp_list, None) + + +def get_average_color(img): + # Convert image to numpy array + img_array = np.array(img) + + # Check if image is grayscale (2D) or has channels (3D) + if len(img_array.shape) < 3: + # Grayscale image (single channel) + avg_value = img_array.mean() + return (int(avg_value),) * 3 + + # Get average color, ignoring fully transparent pixels + if img_array.shape[2] == 4: # RGBA + alpha = img_array[:,:,3] + rgb = img_array[:,:,:3] + mask = alpha > 0 + if mask.any(): + avg_color = rgb[mask].mean(axis=0) + else: + avg_color = rgb.mean(axis=(0,1)) + else: # RGB or other format + channels = img_array.shape[2] + if channels == 1: # Single channel (like grayscale with dimension) + avg_value = img_array.mean() + return (int(avg_value),) * 3 + elif channels == 3: # RGB + avg_color = img_array.mean(axis=(0,1)) + else: # Other formats, use first 3 channels or pad + avg_color = img_array[:,:,:min(3, channels)].mean(axis=(0,1)) + # If less than 3 channels, duplicate the last one + if channels < 3: + avg_color = list(avg_color) + while len(avg_color) < 3: + avg_color.append(avg_color[-1]) + avg_color = np.array(avg_color) + + return tuple(map(int, avg_color)) + + +def get_contrasting_color(color): + return tuple(255 - c for c in color) + + +def convert_transparent_to_contrasting(img: Image.Image): + """ + Convert transparent pixels to a contrasting color. + """ + # Check if the image has an alpha channel + if img.mode in ('RGBA', 'LA'): + # Get average color of non-transparent pixels + avg_color = get_average_color(img) + + # Get contrasting color for background + bg_color = get_contrasting_color(avg_color) + + # Create a new background image with the contrasting color + # Add alpha channel (255) for RGBA format + rgba_bg_color = bg_color + (255,) + background = Image.new('RGBA', img.size, rgba_bg_color) + + # Paste the image on the background. + # The alpha channel will be used as mask + background.paste(img, (0, 0), img) + + # Convert to RGB (removes alpha channel) + return background.convert('RGB') + # Special handling for palette mode with transparency + elif img.mode == 'P' and 'transparency' in img.info: + # Convert P to RGBA first, which handles the transparency info properly + img_rgba = img.convert('RGBA') + + # Get average color of non-transparent pixels + avg_color = get_average_color(img_rgba) + + # Get contrasting color for background + bg_color = get_contrasting_color(avg_color) + + # Create a new background image with the contrasting color + rgba_bg_color = bg_color + (255,) + background = Image.new('RGBA', img.size, rgba_bg_color) + + # Paste the RGBA-converted image on the background + background.paste(img_rgba, (0, 0), img_rgba) + + # Convert to RGB (removes alpha channel) + return background.convert('RGB') + + return img.convert('RGB') + + +def read_img( + path: Union[str, Path], return_type='Tensor' +) -> Union[Image.Image, np.ndarray, torch.Tensor]: + """ + + Args: + path (str): image file path + return_type (str): 返回类型; + 支持 `Tensor`,返回 torch.Tensor;`ndarray`,返回 np.ndarray;`Image`,返回 `Image.Image` + + Returns: RGB Image.Image, or np.ndarray / torch.Tensor, with shape [Channel, Height, Width] + """ + assert return_type in ('Tensor', 'ndarray', 'Image') + img = Image.open(path) + img = ImageOps.exif_transpose(img) # 识别旋转后的图片(pillow不会自动识别) + img = convert_transparent_to_contrasting(img) + if return_type == 'Image': + return img + img = np.ascontiguousarray(np.array(img)) + if return_type == 'ndarray': + return img + return torch.tensor(img.transpose((2, 0, 1))) + + +def save_img(img: Union[torch.Tensor, np.ndarray], path): + if not isinstance(img, torch.Tensor): + img = torch.from_numpy(img) + img = (img - img.min()) / (img.max() - img.min() + 1e-6) + # img *= 255 + # img = img.to(dtype=torch.uint8) + save_image(img, path) + + # Image.fromarray(img).save(path) + + +def get_background_color(image: Image.Image, margin=2): + width, height = image.size + + # 边缘区域的像素采样 + edge_pixels = [] + for x in range(width): + for y in range(height): + if ( + x <= margin + or y <= margin + or x >= width - margin + or y >= height - margin + ): + edge_pixels.append(image.getpixel((x, y))) + + # 统计边缘像素颜色频率 + color_counter = Counter(edge_pixels) + + # 获取频率最高的颜色 + background_color = color_counter.most_common(1)[0][0] + + return background_color + + +def add_img_margin( + image: Image.Image, left_right_margin, top_bottom_margin, background_color=None +): + if background_color is None: + background_color = get_background_color(image) + + # 获取原始图片尺寸 + width, height = image.size + + # 计算新图片的尺寸 + new_width = width + left_right_margin * 2 + new_height = height + top_bottom_margin * 2 + + # 创建新图片对象,并填充指定背景色 + new_image = Image.new("RGB", (new_width, new_height), background_color) + + # 将原始图片粘贴到新图片中央 + new_image.paste(image, (left_right_margin, top_bottom_margin)) + + return new_image + + +def prepare_imgs(imgs: List[Union[str, Path, Image.Image]]) -> List[Image.Image]: + output_imgs = [] + for img in imgs: + if isinstance(img, (str, Path)): + img = read_img(img, return_type='Image') + elif isinstance(img, Image.Image): + img = img.convert('RGB') + else: + raise ValueError(f'Unsupported image type: {type(img)}') + output_imgs.append(img) + + return output_imgs + + +COLOR_LIST = [ + [0, 140, 255], # 深橙色 + [127, 255, 0], # 春绿色 + [255, 144, 30], # 道奇蓝 + [180, 105, 255], # 粉红色 + [128, 0, 128], # 紫色 + [0, 255, 255], # 黄色 + [255, 191, 0], # 深天蓝色 + [50, 205, 50], # 石灰绿色 + [60, 20, 220], # 猩红色 + [130, 0, 75], # 靛蓝色 + [255, 0, 0], # 红色 + [0, 255, 0], # 绿色 + [0, 0, 255], # 蓝色 +] + + +def save_layout_img(img0, categories, one_out, save_path, key='position'): + import cv2 + from cnstd.yolov7.plots import plot_one_box + + """可视化版面分析结果。""" + if isinstance(img0, Image.Image): + img0 = cv2.cvtColor(np.asarray(img0.convert('RGB')), cv2.COLOR_RGB2BGR) + + if len(categories) > 13: + colors = [[random.randint(0, 255) for _ in range(3)] for _ in categories] + else: + colors = COLOR_LIST + for one_box in one_out: + _type = one_box.get('type', 'text') + box = one_box[key] + xyxy = [box[0, 0], box[0, 1], box[2, 0], box[2, 1]] + label = str(_type) + if 'score' in one_box: + label += f', Score: {one_box["score"]:.2f}' + if 'col_number' in one_box: + label += f', Col: {one_box["col_number"]}' + plot_one_box( + xyxy, + img0, + label=label, + color=colors[categories.index(_type)], + line_thickness=1, + ) + + cv2.imwrite(str(save_path), img0) + logger.info(f" The image with the result is saved in: {save_path}") + + +def rotated_box_to_horizontal(box): + """将旋转框转换为水平矩形。 + + :param box: [4, 2],左上角、右上角、右下角、左下角的坐标 + """ + xmin = min(box[:, 0]) + xmax = max(box[:, 0]) + ymin = min(box[:, 1]) + ymax = max(box[:, 1]) + return np.array([[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]) + + +def is_valid_box(box, min_height=8, min_width=2) -> bool: + """判断box是否有效。 + :param box: [4, 2],左上角、右上角、右下角、左下角的坐标 + :param min_height: 最小高度 + :param min_width: 最小宽度 + :return: bool, 是否有效 + """ + return ( + box[0, 0] + min_width <= box[1, 0] + and box[1, 1] + min_height <= box[2, 1] + and box[2, 0] >= box[3, 0] + min_width + and box[3, 1] >= box[0, 1] + min_height + ) + + +def list2box(xmin, ymin, xmax, ymax, dtype=float): + return np.array( + [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]], dtype=dtype + ) + + +def box2list(bbox): + return [int(bbox[0, 0]), int(bbox[0, 1]), int(bbox[2, 0]), int(bbox[2, 1])] + + +def clipbox(box, img_height, img_width): + new_box = np.zeros_like(box) + new_box[:, 0] = np.clip(box[:, 0], 0, img_width - 1) + new_box[:, 1] = np.clip(box[:, 1], 0, img_height - 1) + return new_box + + +def y_overlap(box1, box2, key='position'): + # 计算它们在y轴上的IOU: Interaction / min(height1, height2) + if key: + box1 = [box1[key][0][0], box1[key][0][1], box1[key][2][0], box1[key][2][1]] + box2 = [box2[key][0][0], box2[key][0][1], box2[key][2][0], box2[key][2][1]] + else: + box1 = [box1[0][0], box1[0][1], box1[2][0], box1[2][1]] + box2 = [box2[0][0], box2[0][1], box2[2][0], box2[2][1]] + # 判断是否有交集 + if box1[3] <= box2[1] or box2[3] <= box1[1]: + return 0 + # 计算交集的高度 + y_min = max(box1[1], box2[1]) + y_max = min(box1[3], box2[3]) + return (y_max - y_min) / max(1, min(box1[3] - box1[1], box2[3] - box2[1])) + + +def x_overlap(box1, box2, key='position'): + # 计算它们在x轴上的IOU: Interaction / min(width1, width2) + if key: + box1 = [box1[key][0][0], box1[key][0][1], box1[key][2][0], box1[key][2][1]] + box2 = [box2[key][0][0], box2[key][0][1], box2[key][2][0], box2[key][2][1]] + else: + box1 = [box1[0][0], box1[0][1], box1[2][0], box1[2][1]] + box2 = [box2[0][0], box2[0][1], box2[2][0], box2[2][1]] + # 判断是否有交集 + if box1[2] <= box2[0] or box2[2] <= box1[0]: + return 0 + # 计算交集的宽度 + x_min = max(box1[0], box2[0]) + x_max = min(box1[2], box2[2]) + return (x_max - x_min) / max(1, min(box1[2] - box1[0], box2[2] - box2[0])) + + +def overlap(box1, box2, key='position'): + return x_overlap(box1, box2, key) * y_overlap(box1, box2, key) + + +def get_same_line_boxes(anchor, total_boxes): + line_boxes = [anchor] + for box in total_boxes: + if box['line_number'] >= 0: + continue + if max([y_overlap(box, l_box) for l_box in line_boxes]) > 0.1: + line_boxes.append(box) + return line_boxes + + +def _compare_box(box1, box2, anchor, key, left_best: bool = True): + over1 = y_overlap(box1, anchor, key) + over2 = y_overlap(box2, anchor, key) + if box1[key][2, 0] < box2[key][0, 0] - 3: + return -1 + elif box2[key][2, 0] < box1[key][0, 0] - 3: + return 1 + else: + if max(over1, over2) >= 3 * min(over1, over2): + return over2 - over1 if left_best else over1 - over2 + return box1[key][0, 0] - box2[key][0, 0] + + +def sort_and_filter_line_boxes(line_boxes, key): + if len(line_boxes) <= 1: + return line_boxes + + allowed_max_overlay_x = 20 + + def find_right_box(anchor): + anchor_width = anchor[key][2, 0] - anchor[key][0, 0] + allowed_max = min( + max(allowed_max_overlay_x, anchor_width * 0.5), anchor_width * 0.95 + ) + right_boxes = [ + l_box + for l_box in line_boxes[1:] + if l_box['line_number'] < 0 + and l_box[key][0, 0] >= anchor[key][2, 0] - allowed_max + ] + if not right_boxes: + return None + right_boxes = sorted( + right_boxes, + key=cmp_to_key( + lambda x, y: _compare_box(x, y, anchor, key, left_best=True) + ), + ) + return right_boxes[0] + + def find_left_box(anchor): + anchor_width = anchor[key][2, 0] - anchor[key][0, 0] + allowed_max = min( + max(allowed_max_overlay_x, anchor_width * 0.5), anchor_width * 0.95 + ) + left_boxes = [ + l_box + for l_box in line_boxes[1:] + if l_box['line_number'] < 0 + and l_box[key][2, 0] <= anchor[key][0, 0] + allowed_max + ] + if not left_boxes: + return None + left_boxes = sorted( + left_boxes, + key=cmp_to_key( + lambda x, y: _compare_box(x, y, anchor, key, left_best=False) + ), + ) + return left_boxes[-1] + + res_boxes = [line_boxes[0]] + anchor = res_boxes[0] + line_number = anchor['line_number'] + + while True: + right_box = find_right_box(anchor) + if right_box is None: + break + right_box['line_number'] = line_number + res_boxes.append(right_box) + anchor = right_box + + anchor = res_boxes[0] + while True: + left_box = find_left_box(anchor) + if left_box is None: + break + left_box['line_number'] = line_number + res_boxes.insert(0, left_box) + anchor = left_box + + return res_boxes + + +def merge_boxes(bbox1, bbox2): + """ + Merge two bounding boxes to get a bounding box that encompasses both. + + Parameters: + - bbox1, bbox2: The bounding boxes to merge. Each box is np.ndarray, with shape of [4, 2] + + Returns: new merged box, with shape of [4, 2] + """ + # 解包两个边界框的坐标 + x_min1, y_min1, x_max1, y_max1 = box2list(bbox1) + x_min2, y_min2, x_max2, y_max2 = box2list(bbox2) + + # 计算合并后边界框的坐标 + x_min = min(x_min1, x_min2) + y_min = min(y_min1, y_min2) + x_max = max(x_max1, x_max2) + y_max = max(y_max1, y_max2) + + # 返回合并后的边界框 + return list2box(x_min, y_min, x_max, y_max) + + +def sort_boxes(boxes: List[dict], key='position') -> List[List[dict]]: + # 按y坐标排序所有的框 + boxes.sort(key=lambda box: box[key][0, 1]) + for box in boxes: + box['line_number'] = -1 # 所在行号,-1表示未分配 + + def get_anchor(): + anchor = None + for box in boxes: + if box['line_number'] == -1: + anchor = box + break + return anchor + + lines = [] + while True: + anchor = get_anchor() + if anchor is None: + break + anchor['line_number'] = len(lines) + line_boxes = get_same_line_boxes(anchor, boxes) + line_boxes = sort_and_filter_line_boxes(line_boxes, key) + lines.append(line_boxes) + + return lines + + +def merge_adjacent_bboxes(line_bboxes): + """ + 合并同一行中相邻且足够接近的边界框(bboxes)。 + 如果两个边界框在水平方向上的距离小于行的高度,则将它们合并为一个边界框。 + + :param line_bboxes: 包含边界框信息的列表,每个边界框包含行号、位置(四个角点的坐标)和类型。 + :return: 合并后的边界框列表。 + """ + merged_bboxes = [] + current_bbox = None + + for bbox in line_bboxes: + # 如果是当前行的第一个边界框,或者与上一个边界框不在同一行 + if current_bbox is None: + current_bbox = bbox + continue + + line_number = bbox['line_number'] + position = bbox['position'] + bbox_type = bbox['type'] + + # 计算边界框的高度和宽度 + height = position[2, 1] - position[0, 1] + + # 检查当前边界框与上一个边界框的距离 + distance = position[0, 0] - current_bbox['position'][1, 0] + if ( + current_bbox['type'] == 'text' + and bbox_type == 'text' + and distance <= height + ): + # 合并边界框:ymin 取两个框对应值的较小值,ymax 取两个框对应值的较大 + # [text]_[text] -> [text_text] + ymin = min(position[0, 1], current_bbox['position'][0, 1]) + ymax = max(position[2, 1], current_bbox['position'][2, 1]) + xmin = current_bbox['position'][0, 0] + xmax = position[2, 0] + current_bbox['position'] = list2box(xmin, ymin, xmax, ymax) + else: + if ( + current_bbox['type'] == 'text' + and bbox_type != 'text' + and 0 < distance <= height + ): + # [text]_[embedding] -> [text_][embedding] + current_bbox['position'][1, 0] = position[0, 0] + current_bbox['position'][2, 0] = position[0, 0] + elif ( + current_bbox['type'] != 'text' + and bbox_type == 'text' + and 0 < distance <= height + ): + # [embedding]_[text] -> [embedding][_text] + position[0, 0] = current_bbox['position'][1, 0] + position[3, 0] = current_bbox['position'][1, 0] + # 添加当前边界框,并开始新的合并 + merged_bboxes.append(current_bbox) + current_bbox = bbox + + if current_bbox is not None: + merged_bboxes.append(current_bbox) + + return merged_bboxes + + +def adjust_line_height(bboxes, img_height, max_expand_ratio=0.2): + """ + 基于临近行与行之间间隙,把 box 的高度略微调高(检测出来的 box 可以挨着文字很近)。 + Args: + bboxes (List[List[dict]]): 包含边界框信息的列表,每个边界框包含行号、位置(四个角点的坐标)和类型。 + img_height (int): 原始图像的高度。 + max_expand_ratio (float): 相对于 box 高度来说的上下最大扩展比率 + + Returns: + + """ + + def get_max_text_ymax(line_bboxes): + return max([bbox['position'][2, 1] for bbox in line_bboxes]) + + def get_min_text_ymin(line_bboxes): + return min([bbox['position'][0, 1] for bbox in line_bboxes]) + + if len(bboxes) < 1: + return bboxes + + for line_idx, line_bboxes in enumerate(bboxes): + next_line_ymin = ( + get_min_text_ymin(bboxes[line_idx + 1]) + if line_idx < len(bboxes) - 1 + else img_height + ) + above_line_ymax = get_max_text_ymax(bboxes[line_idx - 1]) if line_idx > 0 else 0 + for box in line_bboxes: + if box['type'] != 'text': + continue + box_height = box['position'][2, 1] - box['position'][0, 1] + if box['position'][0, 1] > above_line_ymax: + expand_size = min( + (box['position'][0, 1] - above_line_ymax) // 3, + int(max_expand_ratio * box_height), + ) + box['position'][0, 1] -= expand_size + box['position'][1, 1] -= expand_size + if box['position'][2, 1] < next_line_ymin: + expand_size = min( + (next_line_ymin - box['position'][2, 1]) // 3, + int(max_expand_ratio * box_height), + ) + box['position'][2, 1] += expand_size + box['position'][3, 1] += expand_size + return bboxes + + +def adjust_line_width( + text_box_infos, formula_box_infos, img_width, max_expand_ratio=0.2 +): + """ + 如果不与其他 box 重叠,就把 text box 往左右稍微扩展一些(检测出来的 text box 在边界上可能会切掉边界字符的一部分)。 + Args: + text_box_infos (List[dict]): 文本框信息,其中 'box' 字段包含四个角点的坐标。 + formula_box_infos (List[dict]): 公式框信息,其中 'position' 字段包含四个角点的坐标。 + img_width (int): 原始图像的宽度。 + max_expand_ratio (float): 相对于 box 高度来说的左右最大扩展比率。 + + Returns: 扩展后的 text_box_infos。 + """ + + def _expand_left_right(box): + expanded_box = box.copy() + xmin, xmax = box[0, 0], box[2, 0] + box_height = box[2, 1] - box[0, 1] + expand_size = int(max_expand_ratio * box_height) + expanded_box[3, 0] = expanded_box[0, 0] = max(xmin - expand_size, 0) + expanded_box[2, 0] = expanded_box[1, 0] = min(xmax + expand_size, img_width - 1) + return expanded_box + + def _is_adjacent(anchor_box, text_box): + if overlap(anchor_box, text_box, key=None) < 1e-6: + return False + anchor_xmin, anchor_xmax = anchor_box[0, 0], anchor_box[2, 0] + text_xmin, text_xmax = text_box[0, 0], text_box[2, 0] + if ( + text_xmin < anchor_xmin < text_xmax < anchor_xmax + or anchor_xmin < text_xmin < anchor_xmax < text_xmax + ): + return True + return False + + for idx, text_box in enumerate(text_box_infos): + expanded_box = _expand_left_right(text_box['position']) + overlapped = False + cand_boxes = [ + _text_box['position'] + for _idx, _text_box in enumerate(text_box_infos) + if _idx != idx + ] + cand_boxes.extend( + [_formula_box['position'] for _formula_box in formula_box_infos] + ) + for cand_box in cand_boxes: + if _is_adjacent(expanded_box, cand_box): + overlapped = True + break + if not overlapped: + text_box_infos[idx]['position'] = expanded_box + + return text_box_infos + + +def crop_box(text_box, formula_box, min_crop_width=2) -> List[np.ndarray]: + """ + 将 text_box 与 formula_box 相交的部分裁剪掉 + Args: + text_box (): + formula_box (): + min_crop_width (int): 裁剪后新的 text box 被保留的最小宽度,低于此宽度的 text box 会被删除。 + + Returns: + + """ + text_xmin, text_xmax = text_box[0, 0], text_box[2, 0] + text_ymin, text_ymax = text_box[0, 1], text_box[2, 1] + formula_xmin, formula_xmax = formula_box[0, 0], formula_box[2, 0] + + cropped_boxes = [] + if text_xmin < formula_xmin: + new_text_xmax = min(text_xmax, formula_xmin) + if new_text_xmax - text_xmin >= min_crop_width: + cropped_boxes.append((text_xmin, text_ymin, new_text_xmax, text_ymax)) + + if text_xmax > formula_xmax: + new_text_xmin = max(text_xmin, formula_xmax) + if text_xmax - new_text_xmin >= min_crop_width: + cropped_boxes.append((new_text_xmin, text_ymin, text_xmax, text_ymax)) + + return [list2box(*box, dtype=None) for box in cropped_boxes] + + +def remove_overlap_text_bbox(text_box_infos, formula_box_infos): + """ + 如果一个 text box 与 formula_box 相交,则裁剪 text box。 + Args: + text_box_infos (): + formula_box_infos (): + + Returns: + + """ + + new_text_box_infos = [] + for idx, text_box in enumerate(text_box_infos): + max_overlap_val = 0 + max_overlap_fbox = None + + for formula_box in formula_box_infos: + cur_val = overlap(text_box['position'], formula_box['position'], key=None) + if cur_val > max_overlap_val: + max_overlap_val = cur_val + max_overlap_fbox = formula_box + + if max_overlap_val < 0.1: # overlap 太少的情况不做任何处理 + new_text_box_infos.append(text_box) + continue + # if max_overlap_val > 0.8: # overlap 太多的情况,直接扔掉 text box + # continue + + cropped_text_boxes = crop_box( + text_box['position'], max_overlap_fbox['position'] + ) + if cropped_text_boxes: + for _box in cropped_text_boxes: + new_box = deepcopy(text_box) + new_box['position'] = _box + new_text_box_infos.append(new_box) + + return new_text_box_infos + + +def is_chinese(ch): + """ + 判断一个字符是否为中文字符 + """ + return '\u4e00' <= ch <= '\u9fff' + + +def find_first_punctuation_position(text): + # 匹配常见标点符号的正则表达式 + pattern = re.compile(r'[,.!?;:()\[\]{}\'\"\\/-]') + match = pattern.search(text) + if match: + return match.start() + else: + return len(text) + + +def smart_join(str_list, spellchecker=None): + """ + 对字符串列表进行拼接,如果相邻的两个字符串都是中文或包含空白符号,则不加空格;其他情况则加空格 + """ + + def contain_whitespace(s): + if re.search(r'\s', s): + return True + else: + return False + + str_list = [s for s in str_list if s] + if not str_list: + return '' + res = str_list[0] + for i in range(1, len(str_list)): + if (is_chinese(res[-1]) and is_chinese(str_list[i][0])) or contain_whitespace( + res[-1] + str_list[i][0] + ): + res += str_list[i] + elif spellchecker is not None and res.endswith('-'): + fields = res.rsplit(' ', maxsplit=1) + if len(fields) > 1: + new_res, prev_word = fields[0], fields[1] + else: + new_res, prev_word = '', res + + fields = str_list[i].split(' ', maxsplit=1) + if len(fields) > 1: + next_word, new_next = fields[0], fields[1] + else: + next_word, new_next = str_list[i], '' + + punct_idx = find_first_punctuation_position(next_word) + next_word = next_word[:punct_idx] + new_next = str_list[i][len(next_word) :] + new_word = prev_word[:-1] + next_word + if ( + next_word + and spellchecker.unknown([prev_word + next_word]) + and spellchecker.known([new_word]) + ): + res = new_res + ' ' + new_word + new_next + else: + new_word = prev_word + next_word + res = new_res + ' ' + new_word + new_next + else: + res += ' ' + str_list[i] + return res + + +def cal_block_xmin_xmax(lines, indentation_thrsh): + total_min_x, total_max_x = min(lines[:, 0]), max(lines[:, 1]) + if lines.shape[0] < 2: + return total_min_x, total_max_x + + min_x, max_x = min(lines[1:, 0]), max(lines[1:, 1]) + first_line_is_full = total_max_x > max_x - indentation_thrsh + if first_line_is_full: + return min_x, total_max_x + + return total_min_x, total_max_x + + +def merge_line_texts( + outs: List[Dict[str, Any]], + auto_line_break: bool = True, + line_sep='\n', + embed_sep=(' $', '$ '), + isolated_sep=('$$\n', '\n$$'), + spellchecker=None, +) -> str: + """ + 把 Pix2Text.recognize_by_mfd() 的返回结果,合并成单个字符串 + Args: + outs (List[Dict[str, Any]]): + auto_line_break: 基于box位置自动判断是否该换行 + line_sep: 行与行之间的分隔符 + embed_sep (tuple): Prefix and suffix for embedding latex; default value is `(' $', '$ ')` + isolated_sep (tuple): Prefix and suffix for isolated latex; default value is `('$$\n', '\n$$')` + spellchecker: Spell Checker + + Returns: 合并后的字符串 + + """ + if not outs: + return '' + out_texts = [] + line_margin_list = [] # 每行的最左边和最右边的x坐标 + isolated_included = [] # 每行是否包含了 `isolated` 类型的数学公式 + line_height_dict = defaultdict(list) # 每行中每个块对应的高度 + line_ymin_ymax_list = [] # 每行的最上边和最下边的y坐标 + for _out in outs: + line_number = _out.get('line_number', 0) + while len(out_texts) <= line_number: + out_texts.append([]) + line_margin_list.append([100000, 0]) + isolated_included.append(False) + line_ymin_ymax_list.append([100000, 0]) + cur_text = _out['text'] + cur_type = _out.get('type', 'text') + box = _out['position'] + if cur_type in ('embedding', 'isolated'): + sep = isolated_sep if _out['type'] == 'isolated' else embed_sep + cur_text = sep[0] + cur_text + sep[1] + if cur_type == 'isolated': + isolated_included[line_number] = True + cur_text = line_sep + cur_text + line_sep + out_texts[line_number].append(cur_text) + line_margin_list[line_number][1] = max( + line_margin_list[line_number][1], float(box[2, 0]) + ) + line_margin_list[line_number][0] = min( + line_margin_list[line_number][0], float(box[0, 0]) + ) + if cur_type == 'text': + line_height_dict[line_number].append(box[2, 1] - box[1, 1]) + line_ymin_ymax_list[line_number][0] = min( + line_ymin_ymax_list[line_number][0], float(box[0, 1]) + ) + line_ymin_ymax_list[line_number][1] = max( + line_ymin_ymax_list[line_number][1], float(box[2, 1]) + ) + + line_text_list = [smart_join(o) for o in out_texts] + + for _line_number in line_height_dict.keys(): + if line_height_dict[_line_number]: + line_height_dict[_line_number] = np.mean(line_height_dict[_line_number]) + _line_heights = list(line_height_dict.values()) + mean_height = np.mean(_line_heights) if _line_heights else None + + default_res = re.sub(rf'{line_sep}+', line_sep, line_sep.join(line_text_list)) + if not auto_line_break: + return default_res + + line_lengths = [rx - lx for lx, rx in line_margin_list] + line_length_thrsh = max(line_lengths) * 0.3 + if line_length_thrsh < 1: + return default_res + + lines = np.array( + [ + margin + for idx, margin in enumerate(line_margin_list) + if isolated_included[idx] or line_lengths[idx] >= line_length_thrsh + ] + ) + if lines.shape[0] < 1: + return default_res + min_x, max_x = min(lines[:, 0]), max(lines[:, 1]) + + indentation_thrsh = (max_x - min_x) * 0.1 + if mean_height is not None: + indentation_thrsh = 1.5 * mean_height + + min_x, max_x = cal_block_xmin_xmax(lines, indentation_thrsh) + + res_line_texts = [''] * len(line_text_list) + line_text_list = [(idx, txt) for idx, txt in enumerate(line_text_list) if txt] + for idx, (line_number, txt) in enumerate(line_text_list): + if isolated_included[line_number]: + res_line_texts[line_number] = line_sep + txt + line_sep + continue + + tmp = txt + if line_margin_list[line_number][0] > min_x + indentation_thrsh: + tmp = line_sep + txt + if line_margin_list[line_number][1] < max_x - indentation_thrsh: + tmp = tmp + line_sep + if idx < len(line_text_list) - 1: + cur_height = line_ymin_ymax_list[line_number][1] - line_ymin_ymax_list[line_number][0] + next_line_number = line_text_list[idx + 1][0] + if ( + cur_height > 0 + and line_ymin_ymax_list[next_line_number][0] < line_ymin_ymax_list[next_line_number][1] + and line_ymin_ymax_list[next_line_number][0] - line_ymin_ymax_list[line_number][1] + > cur_height + ): # 当前行与下一行的间距超过了一行的行高,则认为它们之间应该是不同的段落 + tmp = tmp + line_sep + res_line_texts[idx] = tmp + + outs = smart_join([c for c in res_line_texts if c], spellchecker) + return re.sub(rf'{line_sep}+', line_sep, outs) # 把多个 '\n' 替换为 '\n' + + +def run_hf_download_cmd(remote_repo, model_dir, env=None): + """ + 统一在不同平台下执行 huggingface-cli 下载命令。 + Args: + remote_repo: huggingface 仓库名 + model_dir: 下载到的本地目录 + env: 可选,传递给 subprocess 的环境变量 + """ + if platform.system() == 'Windows': + download_cmd = [ + 'huggingface-cli', 'download', '--repo-type', 'model', + '--resume-download', '--local-dir-use-symlinks', 'False', + remote_repo, '--local-dir', str(model_dir) + ] + subprocess.run(download_cmd, env=env, shell=False) + else: + download_cmd = f'huggingface-cli download --repo-type model --resume-download --local-dir-use-symlinks False {remote_repo} --local-dir {shlex.quote(str(model_dir))}' + subprocess.run(download_cmd, env=env, shell=True) + + +def prepare_model_files(root, model_info, mirror_url='https://hf-mirror.com') -> Path: + model_root_dir = Path(root) / MODEL_VERSION + model_dir = model_root_dir / model_info['local_model_id'] + if model_dir.is_dir() and list(model_dir.glob('**/[!.]*')): + return model_dir + assert 'hf_model_id' in model_info + model_dir.mkdir(parents=True) + run_hf_download_cmd(model_info["hf_model_id"], model_dir) + # 如果当前目录下无文件,就从huggingface上下载 + if not list(model_dir.glob('**/[!.]*')): + if model_dir.exists(): + shutil.rmtree(str(model_dir)) + env = os.environ.copy() + env['HF_ENDPOINT'] = mirror_url + run_hf_download_cmd(model_info["hf_model_id"], model_dir, env=env) + return model_dir + + +def prepare_model_files2(model_fp_or_dir, remote_repo, file_or_dir='file', mirror_url='https://hf-mirror.com'): + """ + 从远程指定的仓库下载模型文件。 + Args: + model_fp_or_dir: 下载的模型文件会保存到此路径 + remote_repo: 指定的远程仓库 + file_or_dir: model_fp_or_dir 是文件路径还是目录路径。注:下载的都是目录 + mirror_url: 指定的 HuggingFace 国内镜像网址;如果无法从 HuggingFace 官方仓库下载,会自动从此国内镜像下载。默认值为 'https://hf-mirror.com' + """ + model_fp_or_dir = Path(model_fp_or_dir) + if file_or_dir == 'file': + if model_fp_or_dir.exists(): + return model_fp_or_dir + model_dir = model_fp_or_dir.parent + else: + model_dir = model_fp_or_dir + if model_dir.exists(): + shutil.rmtree(str(model_dir)) + model_dir.mkdir(parents=True) + run_hf_download_cmd(remote_repo, model_dir) + download_status = False + if file_or_dir == 'file': + if model_fp_or_dir.exists(): # download failed above + download_status = True + else: # model_dir 存在且非空,则下载成功 + if model_dir.exists() and list(model_dir.glob('**/[!.]*')): + download_status = True + if not download_status: # download failed above + if model_dir.exists(): + shutil.rmtree(str(model_dir)) + env = os.environ.copy() + env['HF_ENDPOINT'] = mirror_url + run_hf_download_cmd(remote_repo, model_dir, env=env) + return model_fp_or_dir + + +def calculate_cer(predicted_text: str, ground_truth_text: str) -> float: + """ + Calculate Character Error Rate (CER) between predicted text and ground truth text. + + Uses torchmetrics implementation for accurate CER calculation. + + Args: + predicted_text: The predicted text string + ground_truth_text: The ground truth text string + + Returns: + float: Character Error Rate (0.0 = perfect match, higher values = more errors) + """ + try: + from torchmetrics.text import CharErrorRate + + # Initialize the CER metric + cer_metric = CharErrorRate() + + # Calculate CER + cer = cer_metric(predicted_text, ground_truth_text) + + return float(cer.item()) + + except ImportError: + # Fallback to simple implementation if torchmetrics is not available + import difflib + + # Convert to lists of characters for comparison + pred_chars = list(predicted_text) + gt_chars = list(ground_truth_text) + + # Use difflib to get the differences + matcher = difflib.SequenceMatcher(None, gt_chars, pred_chars) + + # Count operations + substitutions = 0 + deletions = 0 + insertions = 0 + + for tag, i1, i2, j1, j2 in matcher.get_opcodes(): + if tag == 'replace': + # Count substitutions (replacements) + substitutions += max(i2 - i1, j2 - j1) + elif tag == 'delete': + # Count deletions + deletions += i2 - i1 + elif tag == 'insert': + # Count insertions + insertions += j2 - j1 + # 'equal' operations don't count as errors + + # Calculate total errors + total_errors = substitutions + deletions + insertions + + # Calculate CER + if len(gt_chars) == 0: + # If ground truth is empty, CER is 1.0 if prediction is not empty, 0.0 otherwise + return 1.0 if len(pred_chars) > 0 else 0.0 + + cer = total_errors / len(gt_chars) + return cer + + +def calculate_cer_batch(predictions: List[str], ground_truths: List[str]) -> Dict[str, float]: + """ + Calculate CER for a batch of predictions and ground truths. + + Uses torchmetrics for efficient batch processing when available. + + Args: + predictions: List of predicted text strings + ground_truths: List of ground truth text strings + + Returns: + dict: Dictionary containing average CER and individual CERs + """ + if len(predictions) != len(ground_truths): + raise ValueError("Number of predictions must equal number of ground truths") + + try: + from torchmetrics.text import CharErrorRate + + # Initialize the CER metric + cer_metric = CharErrorRate() + + # Calculate CER for the entire batch + batch_cer = cer_metric(predictions, ground_truths) + + # Calculate individual CERs + cers = [] + for pred, gt in zip(predictions, ground_truths): + individual_metric = CharErrorRate() + cer = individual_metric([pred], [gt]) + cers.append(float(cer.item())) + + return { + 'average_cer': float(batch_cer.item()), + 'individual_cers': cers, + 'total_samples': len(cers) + } + + except ImportError: + # Fallback to individual calculation if torchmetrics is not available + cers = [] + for pred, gt in zip(predictions, ground_truths): + cer = calculate_cer(pred, gt) + cers.append(cer) + + avg_cer = sum(cers) / len(cers) if cers else 0.0 + + return { + 'average_cer': avg_cer, + 'individual_cers': cers, + 'total_samples': len(cers) + } + + +def save_evaluation_results_to_excel_with_images( + results: List[Dict[str, Any]], + output_file: str, + img_path_key: str = 'img_path', + gt_key: str = 'ground_truth', + pred_key: str = 'prediction', + cer_key: str = 'cer', + prefix_img_dir: str = '', + max_img_width: int = 200, + max_img_height: int = 150 +) -> bool: + """ + Save evaluation results to Excel file with embedded images. + + Args: + results: List of dictionaries containing evaluation results + output_file: Path to save the Excel file + img_path_key: Key name for image path in results + gt_key: Key name for ground truth text in results + pred_key: Key name for predicted text in results + cer_key: Key name for CER value in results + prefix_img_dir: Root directory to prepend to image paths + max_img_width: Maximum width for embedded images in pixels + max_img_height: Maximum height for embedded images in pixels + + Returns: + bool: True if successful, False otherwise + """ + try: + import openpyxl + from openpyxl.drawing.image import Image as XLImage + from openpyxl.utils import get_column_letter + from PIL import Image as PILImage + import io + + except ImportError as e: + print(f"Error: Required library not found: {e}") + print("Please install openpyxl: pip install openpyxl") + return False + + try: + # Create a new workbook and select the active sheet + wb = openpyxl.Workbook() + ws = wb.active + ws.title = "Evaluation Results" + + # Set up headers + headers = ['Image', 'Ground Truth', 'Prediction', 'CER'] + for col, header in enumerate(headers, 1): + ws.cell(row=1, column=col, value=header) + ws.cell(row=1, column=col).font = openpyxl.styles.Font(bold=True) + + # Set column widths + ws.column_dimensions['A'].width = 30 # Image column + ws.column_dimensions['B'].width = 50 # Ground Truth column + ws.column_dimensions['C'].width = 50 # Prediction column + ws.column_dimensions['D'].width = 15 # CER column + + # Process each result + for row_idx, result in enumerate(results, 2): + # Handle image path + img_path = result.get(img_path_key, '') + if prefix_img_dir and not os.path.isabs(img_path): + img_path = os.path.join(prefix_img_dir, img_path) + + # Add ground truth, prediction, and CER + ws.cell(row=row_idx, column=2, value=result.get(gt_key, '')) + ws.cell(row=row_idx, column=3, value=result.get(pred_key, '')) + ws.cell(row=row_idx, column=4, value=result.get(cer_key, 0.0)) + + # Try to embed image + if img_path and os.path.exists(img_path): + try: + # Open and resize image + with PILImage.open(img_path) as pil_img: + # Convert to RGB if necessary + if pil_img.mode in ('RGBA', 'LA', 'P'): + pil_img = pil_img.convert('RGB') + + # Calculate resize ratio to fit within max dimensions + width, height = pil_img.size + ratio = min(max_img_width / width, max_img_height / height, 1.0) + new_width = int(width * ratio) + new_height = int(height * ratio) + + if ratio < 1.0: + pil_img = pil_img.resize((new_width, new_height), PILImage.Resampling.LANCZOS) + + # Save to bytes + img_bytes = io.BytesIO() + pil_img.save(img_bytes, format='PNG') + img_bytes.seek(0) + + # Create openpyxl image + xl_img = XLImage(img_bytes) + xl_img.width = new_width + xl_img.height = new_height + + # Add image to cell + ws.add_image(xl_img, f'A{row_idx}') + + except Exception as e: + print(f"Warning: Could not embed image {img_path}: {e}") + ws.cell(row=row_idx, column=1, value=f"Image Error: {os.path.basename(img_path)}") + else: + ws.cell(row=row_idx, column=1, value=f"Not Found: {os.path.basename(img_path) if img_path else 'No path'}") + + # Save the workbook + wb.save(output_file) + print(f"Excel file with embedded images saved to: {output_file}") + return True + + except Exception as e: + print(f"Error saving Excel file: {e}") + return False + + +def create_html_report_with_images( + results: List[Dict[str, Any]], + output_file: str, + img_path_key: str = 'img_path', + gt_key: str = 'ground_truth', + pred_key: str = 'prediction', + cer_key: str = 'cer', + prefix_img_dir: str = '', + max_img_width: int = 200, + max_img_height: int = 150 +) -> bool: + """ + Create HTML report with embedded images as alternative to Excel. + + Args: + results: List of dictionaries containing evaluation results + output_file: Path to save the HTML file + img_path_key: Key name for image path in results + gt_key: Key name for ground truth text in results + pred_key: Key name for predicted text in results + cer_key: Key name for CER value in results + prefix_img_dir: Root directory to prepend to image paths + max_img_width: Maximum width for embedded images in pixels + max_img_height: Maximum height for embedded images in pixels + + Returns: + bool: True if successful, False otherwise + """ + try: + import base64 + from PIL import Image as PILImage + import io + + # HTML template + html_template = """ + + + + Pix2Text Evaluation Results + + + +

Pix2Text Evaluation Results

+ + + + + + + + {rows} +
ImageGround TruthPredictionCER
+ +""" + + rows_html = "" + total_cer = 0.0 + valid_count = 0 + + for result in results: + # Handle image path + img_path = result.get(img_path_key, '') + if prefix_img_dir and not os.path.isabs(img_path): + img_path = os.path.join(prefix_img_dir, img_path) + + # Process image + img_html = "" + if img_path and os.path.exists(img_path): + try: + with PILImage.open(img_path) as pil_img: + if pil_img.mode in ('RGBA', 'LA', 'P'): + pil_img = pil_img.convert('RGB') + + # Resize if needed + width, height = pil_img.size + ratio = min(max_img_width / width, max_img_height / height, 1.0) + new_width = int(width * ratio) + new_height = int(height * ratio) + + if ratio < 1.0: + pil_img = pil_img.resize((new_width, new_height), PILImage.Resampling.LANCZOS) + + # Convert to base64 + img_bytes = io.BytesIO() + pil_img.save(img_bytes, format='PNG') + img_base64 = base64.b64encode(img_bytes.getvalue()).decode() + + img_html = f'Image' + + except Exception as e: + img_html = f'Error: {os.path.basename(img_path)}' + else: + img_html = f'Not Found: {os.path.basename(img_path) if img_path else "No path"}' + + # Get text values + gt_text = result.get(gt_key, '').replace('\n', '
') + pred_text = result.get(pred_key, '').replace('\n', '
') + cer_value = result.get(cer_key, 0.0) + + # Determine CER class for styling + if cer_value <= 0.1: + cer_class = "cer-good" + elif cer_value <= 0.3: + cer_class = "cer-medium" + else: + cer_class = "cer-bad" + + if cer_value is not None: + total_cer += cer_value + valid_count += 1 + + # Create row HTML + row_html = f""" + {img_html} + {gt_text} + {pred_text} + {cer_value:.4f} + """ + rows_html += row_html + + # Calculate average CER + avg_cer = total_cer / valid_count if valid_count > 0 else 0.0 + + # Add summary row + summary_row = f""" + Average CER + {avg_cer:.4f} + """ + rows_html += summary_row + + # Generate final HTML + final_html = html_template.format( + max_width=max_img_width, + max_height=max_img_height, + rows=rows_html + ) + + # Save HTML file + with open(output_file, 'w', encoding='utf-8') as f: + f.write(final_html) + + print(f"HTML report saved to: {output_file}") + print(f"Average CER: {avg_cer:.4f}") + return True + + except Exception as e: + print(f"Error creating HTML report: {e}") + return False diff --git a/pix2text/vlm_api.py b/pix2text/vlm_api.py new file mode 100644 index 0000000000000000000000000000000000000000..7866548843d7e8a05605019d7dc27f3813aadca7 --- /dev/null +++ b/pix2text/vlm_api.py @@ -0,0 +1,240 @@ +# coding: utf-8 +import io +import requests +import base64 +import logging +from typing import Optional, Union, List +from pathlib import Path + +import numpy as np +from PIL import Image +from litellm import completion, batch_completion + +logger = logging.getLogger(__name__) + + +# Function to encode the image +def encode_image( + image_path: Union[str, Image.Image], + *, + max_image_size: int = 768, + auto_resize: bool = True, +) -> Optional[str]: + """Encodes an image file or URL to base64 string with optional resizing. + + This function can handle three types of inputs: + 1. PIL Image object + 2. URL string (starting with http:// or https://) + 3. Local file path + + The function optionally resizes images that exceed the specified maximum dimension + while maintaining the aspect ratio. + + Args: + image_path (Union[str, PIL.Image.Image]): Path to image file, URL, or PIL Image object + max_image_size (int, optional): Maximum dimension for image resize. Defaults to 768. + auto_resize (bool, optional): Whether to automatically resize large images. Defaults to True. + + Returns: + str: Base64 encoded string of the image data. + Returns None if URL fetching fails. + + Example: + >>> encoded = encode_image("path/to/image.jpg") + >>> encoded = encode_image("https://example.com/image.jpg") + >>> encoded = encode_image(pil_image_object) + """ + if isinstance(image_path, Image.Image): + img = image_path.convert("RGB") + buffered = io.BytesIO() + img.save(buffered, format="PNG") + image_data = buffered.getvalue() + elif image_path.startswith(("http://", "https://")): + response = requests.get(image_path) + try: + response.raise_for_status() # Raise an error for bad responses + except requests.exceptions.HTTPError as e: + print(f"Failed to fetch image: {e}") + return None + image_data = response.content + else: + with open(image_path, "rb") as image_file: + image_data = image_file.read() + if not auto_resize: + return base64.b64encode(image_data).decode("utf-8") + + # 对分辨率太高的图片, 把其短边压缩到 max_image_size + image = Image.open(io.BytesIO(image_data)) + width, height = image.size + if width > max_image_size and height > max_image_size: + if width > height: + new_height = max_image_size + new_width = int(width * max_image_size / height) + else: + new_height = int(height * max_image_size / width) + new_width = max_image_size + image = image.resize((new_width, new_height)) + # image.save("out-resize.png") + # Convert image to bytes + buffered = io.BytesIO() + image.save(buffered, format="PNG") + return base64.b64encode(buffered.getvalue()).decode("utf-8") + + +def parse_content(content) -> dict: + """Parse the content from the API response. + Example: + convert '## text_language\nzh\n## text_content\n```\n## 写作。Writing. (50 points)\n写一写自己的一个爱好。\n```' to a structured dictionary: + { + "language": "zh", + "content": "## 写作。Writing. (50 points)\n写一写自己的一个爱好。\n```" + } + + Args: + content (str): The content string to parse. + + Returns: + dict: The parsed content, with keys "language" and "text". + - language (str): The language of the content. + - text (str): The text content, which may include Markdown formatting. + """ + if not isinstance(content, str): + raise ValueError("Content must be a string") + splits = content.split("## text_content") + if len(splits) != 2: + raise ValueError("Content format is incorrect") + parsed_str = splits[1].strip() + # 去掉 开头的 ```.*\n 和结尾的 ``` + if parsed_str.startswith("```"): + parsed_str = parsed_str[parsed_str.index("\n") + 1 :] + if parsed_str.endswith("```"): + parsed_str = parsed_str[: parsed_str.rindex("```")].strip() + lang_splits = splits[0].split("## text_language") + if len(lang_splits) != 2: + raise ValueError("Language format is incorrect") + lang = lang_splits[1].strip() + + return { + "language": lang, + "text": parsed_str, + } + +PROMPT = """ +首先识别图片中的文字是什么语言,然后再把图片中的文字或数学公式转换成Markdown格式表示, 数学公式使用tex表示。 +注意: +- 不要出现任何多余的文字 +- 行内内嵌公式使用`$...$`包裹 +- 独立行公式使用`$$\n...\n$$`包裹 +- 表格中的每行开头和结尾都要有| +- 段落标题前面使用合适数量的 # +输出格式示例: +## text_language +zh +## text_content +``` +这是文字。内嵌公式:$x^2$。独立行公式: +$$ +x^2 + y^2 = z^2 +$$ +``` +""" + + +class Vlm(object): + """ + VLM API for image-to-text conversion. + This class uses the Litellm library to interact with the VLM API. + """ + + def __init__(self, *, model_name: str, api_key: str) -> None: + self.model_name = model_name + self.api_key = api_key + + def __call__( + self, + img_path: Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]] = None, + *, + prompt: str = PROMPT, + resized_shape: int = 768, + auto_resize: bool = True, + parsing_func: Optional[callable] = parse_content, + **kwargs, + ) -> Union[dict, List[dict]]: + """Call the VLM API to convert image to text. + Args: + img_path (Union[str, Path, Image.Image, List[str], List[Path], List[Image.Image]]): Path to the image file or files. + prompt (str): Prompt for the API. + auto_resize (bool): Whether to automatically resize large images. + **kwargs: Additional arguments for the API call. + Returns: + dict or List[dict]: A dictionary for single image and list of dicts for multiple images. Each dict contains the text extracted from the image and the score: + - text: Extracted text from the image. + - score: Probability score of the extracted text. + """ + single_image = False if isinstance(img_path, (list, tuple)) else True + img_paths = [img_path] if single_image else img_path + messages = [] + for img_path in img_paths: + if isinstance(img_path, Path): + img_path = str(img_path) + if not isinstance(img_path, (str, Image.Image)): + raise ValueError("img_path must be a string or PIL Image object") + base64_image = encode_image(img_path, max_image_size=resized_shape, auto_resize=auto_resize) + content = [ + {"type": "text", "text": prompt}, + { + "type": "image_url", + "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}, + }, + ] + messages.append( + [ + {"role": "user", "content": content}, + ] + ) + + try: + responses = batch_completion( + model=self.model_name, + messages=messages, + api_key=self.api_key, + **kwargs, + ) + + results = [] + for response in responses: + # Extract the response content + out = response.get("choices", [{}])[0].get("message", {}).get("content") + logprob = response.get("choices", [{}])[0].get("logprobs") + # to probability + prob = float(np.exp(logprob)) if logprob else 0.0 + if parsing_func is None: + one_res = { + "text": out, + "score": prob, + } + else: + try: + one_res = parsing_func(out) + one_res["score"] = prob + except Exception as exc: + logger.error("An error occurred while parsing the content: %s", exc) + one_res = { + "text": out, + "score": prob, + } + results.append(one_res) + except Exception as exc: + logger.error("An error occurred: %s", exc) + results = [{ + "text": "", + "score": 0.0, + } for _ in img_paths] + + return results[0] if single_image else results + + def __repr__(self): + return f"Vlm(model_name={self.model_name})" + + def __str__(self): + return f"Vlm(model_name={self.model_name})" diff --git a/pix2text/vlm_table_ocr.py b/pix2text/vlm_table_ocr.py new file mode 100644 index 0000000000000000000000000000000000000000..2088eed78d74f5765afc01ff6a1196c10ff9ca02 --- /dev/null +++ b/pix2text/vlm_table_ocr.py @@ -0,0 +1,218 @@ +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). + +import os +from typing import Union, Optional, Dict, Any, List +from copy import deepcopy +from pathlib import Path + +from PIL import Image +import numpy as np + +from .utils import read_img + + + +# Default VLM prompt for table recognition +TABLE_PROMPT = """ +首先识别图片中的文字是什么语言,然后再把图片中的表格转换成Markdown格式表示, 数学公式使用tex表示。 +注意: +- 不要出现任何多余的文字 +- 行内内嵌公式使用$符号包裹 +- 独立行公式使用$$符号包裹 +- 表格中的每行开头和结尾都要有| +输出格式示例: +## text_language +en +## text_content +``` +|---|---| +| 1 | line1 | +| 2 | square: $a^2$ | +| 3 | $$r^2$$ | +``` +) +""" + + +class VlmTableOCR(object): + """ + Implements table extraction using Vision Language Models. + This class uses the same interface as TableOCR but leverages VLM capabilities. + """ + + def __init__( + self, + vlm=None, + **kwargs, + ): + """ + Initialize a VlmTableOCR object. + + Args: + vlm: Vision Language Model instance for table recognition + **kwargs: Additional parameters + """ + if vlm is None: + raise ValueError("vlm must be provided") + + self.vlm = vlm + + @classmethod + def from_config( + cls, + configs: Optional[dict] = None, + **kwargs, + ): + """ + Create a VlmTableOCR instance from configuration. + + Args: + vlm: Vision Language Model instance + configs (Optional[dict], optional): Configuration dictionary + **kwargs: Additional parameters + + Returns: + VlmTableOCR: An instance of VlmTableOCR + """ + from .vlm_api import Vlm + + # Combine configs with any additional kwargs + all_kwargs = kwargs.copy() + if configs: + all_kwargs.update(configs) + + vlm = Vlm( + model_name=all_kwargs.pop("model_name", None), + api_key=all_kwargs.pop("api_key", None), + ) + + return cls( + vlm=vlm, + **all_kwargs + ) + + def recognize( + self, + img: Union[str, Path, Image.Image], + *, + prompt: Optional[str] = TABLE_PROMPT, + out_objects=False, + out_cells=False, + out_html=False, + out_csv=False, + out_markdown=True, + **kwargs, + ) -> Dict[str, Any]: + """ + Recognize tables from an image using VLM. + + Args: + img: Input image (path, PIL.Image) + prompt (Optional[str]): Custom prompt for VLM + out_objects (bool): Whether to output objects + out_cells (bool): Whether to output cells + out_html (bool): Whether to output HTML + out_csv (bool): Whether to output CSV + out_markdown (bool): Whether to output Markdown + **kwargs: Additional parameters + * resized_shape (int): Resize shape for large images + * save_analysis_res (str): Save the parsed result image in this file + + Returns: + Dict[str, Any]: Dictionary containing recognized table data in requested formats + """ + out_formats = {} + + if not (out_objects or out_cells or out_html or out_csv or out_markdown): + print("No output format specified") + return out_formats + + if not isinstance(img, (str, Path, Image.Image)): + raise ValueError("img must be a path or PIL.Image") + + # Process with VLM + try: + vlm_result = self.vlm( + img_path=img, + prompt=prompt, + auto_resize=True, + resized_shape=kwargs.get("resized_shape", 768), + **kwargs, + ) + + markdown_text = vlm_result.get("text", "") + + # For markdown output + if out_markdown: + out_formats["markdown"] = [markdown_text] + + # For HTML output (convert from markdown if needed) + if out_html: + try: + import markdown + + html_text = markdown.markdown(markdown_text, extensions=["tables"]) + # Extract just the table HTML + if "" in html_text: + table_html = html_text[ + html_text.find("
") : html_text.rfind("
") + 8 + ] + out_formats["html"] = [table_html] + else: + out_formats["html"] = [ + "
Failed to convert to HTML
" + ] + except ImportError: + out_formats["html"] = [ + "
Markdown conversion library not available
" + ] + + # For CSV output (convert from markdown if needed) + if out_csv: + try: + import pandas as pd + import io + + # Simple markdown table to CSV conversion + lines = [ + line.strip() + for line in markdown_text.split("\n") + if line.strip() + ] + cleaned_lines = [] + + for line in lines: + if line.startswith("|") and line.endswith("|"): + # Remove the first and last | and split by | + cells = [cell.strip() for cell in line[1:-1].split("|")] + cleaned_lines.append(",".join(cells)) + + if cleaned_lines and "---" in cleaned_lines[1]: + # Remove the separator line (---|---|---) + cleaned_lines.pop(1) + + csv_content = "\n".join(cleaned_lines) + out_formats["csv"] = [csv_content] + except Exception as e: + out_formats["csv"] = [f"Error converting to CSV: {str(e)}"] + + # For cellular representation (simplified for VLM) + if out_cells: + raise NotImplementedError( + "Cellular representation is not implemented for VLMTableOCR." + ) + + # For objects (simplified for VLM) + if out_objects: + raise NotImplementedError( + "Object representation is not implemented for VLMTableOCR." + ) + + except Exception as e: + print(f"Error recognizing table: {e}") + if out_markdown: + out_formats["markdown"] = ["Error processing table with VLM"] + + return out_formats diff --git a/pix2text_v1_0.ipynb b/pix2text_v1_0.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..04e4186b9e8c07d718b465edd70921b96581e546 --- /dev/null +++ b/pix2text_v1_0.ipynb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4640eaa9cf4d52cac4c6d7963983957b7562748845e680b07772e2c9959ec42d +size 1298689 diff --git a/pix2text_v1_1.ipynb b/pix2text_v1_1.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..1cd069cae0ad723fd26ab54f63f1575ddbb040a6 --- /dev/null +++ b/pix2text_v1_1.ipynb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:647c0f0b58ff4314f0aaa741315bf66e06677889205eff173e108436c1c7f03a +size 1235746 diff --git a/readme.txt b/readme.txt new file mode 100644 index 0000000000000000000000000000000000000000..597f90681b1506023654aded755abfd9b10f246a --- /dev/null +++ b/readme.txt @@ -0,0 +1 @@ +https://github.com/breezedeus/Pix2Text \ No newline at end of file diff --git a/requirements.in b/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..147ad533718f0fbd9de7aa4cf169e2053861f65d --- /dev/null +++ b/requirements.in @@ -0,0 +1,20 @@ +--index-url https://mirrors.aliyun.com/pypi/simple +--extra-index-url https://pypi.tuna.tsinghua.edu.cn/simple +--extra-index-url https://pypi.org/simple + +click +tqdm +numpy +torch +torchvision +pillow>=5.3.0 +opencv-python +cnstd>=1.2.6.1 +cnocr[ort-cpu]>=2.3.2.1 +transformers>=4.37.0 +optimum[onnxruntime] +easyocr +pymupdf>=1.20.0 +pyspellchecker +doclayout-yolo<0.1 +albumentations==1.3.1 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..124f428724853c826d0fe5791c7d47690910f459 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,452 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --output-file=requirements.txt requirements.in +# +--index-url https://mirrors.aliyun.com/pypi/simple +--extra-index-url https://pypi.tuna.tsinghua.edu.cn/simple +--extra-index-url https://pypi.org/simple + +aiohttp==3.9.3 + # via + # datasets + # fsspec +aiosignal==1.3.1 + # via aiohttp +albucore==0.0.20 + # via albumentations +albumentations==1.4.21 + # via doclayout-yolo +annotated-types==0.7.0 + # via pydantic +antlr4-python3-runtime==4.9.3 + # via omegaconf +appdirs==1.4.4 + # via wandb +async-timeout==4.0.3 + # via aiohttp +attrs==23.2.0 + # via aiohttp +certifi==2024.2.2 + # via + # requests + # sentry-sdk +charset-normalizer==3.3.2 + # via requests +click==8.1.7 + # via + # -r requirements.in + # cnocr + # cnstd + # wandb +cnocr[ort-cpu]==2.3.2.1 + # via -r requirements.in +cnstd==1.2.6.1 + # via + # -r requirements.in + # cnocr +coloredlogs==15.0.1 + # via + # onnxruntime + # optimum +colorlog==6.9.0 + # via rapidocr +contourpy==1.2.0 + # via matplotlib +cycler==0.12.1 + # via matplotlib +datasets==2.17.0 + # via + # evaluate + # optimum +dill==0.3.8 + # via + # datasets + # evaluate + # multiprocess +docker-pycreds==0.4.0 + # via wandb +doclayout-yolo==0.0.4 + # via -r requirements.in +easyocr==1.7.1 + # via -r requirements.in +eval-type-backport==0.2.0 + # via albumentations +evaluate==0.4.1 + # via optimum +filelock==3.13.1 + # via + # datasets + # huggingface-hub + # torch + # transformers +flatbuffers==23.5.26 + # via onnxruntime +fonttools==4.49.0 + # via matplotlib +frozenlist==1.4.1 + # via + # aiohttp + # aiosignal +fsspec[http]==2023.10.0 + # via + # datasets + # evaluate + # huggingface-hub + # pytorch-lightning + # torch +gitdb==4.0.11 + # via gitpython +gitpython==3.1.42 + # via wandb +huggingface-hub==0.20.3 + # via + # cnstd + # datasets + # evaluate + # optimum + # tokenizers + # transformers +humanfriendly==10.0 + # via coloredlogs +idna==3.6 + # via + # requests + # yarl +imageio==2.34.0 + # via scikit-image +jinja2==3.1.3 + # via torch +kiwisolver==1.4.5 + # via matplotlib +lazy-loader==0.3 + # via scikit-image +lightning-utilities==0.10.1 + # via + # pytorch-lightning + # torchmetrics +markupsafe==2.1.5 + # via jinja2 +matplotlib==3.8.3 + # via + # cnstd + # doclayout-yolo + # seaborn + # ultralytics +mpmath==1.3.0 + # via sympy +multidict==6.0.5 + # via + # aiohttp + # yarl +multiprocess==0.70.16 + # via + # datasets + # evaluate +networkx==3.2.1 + # via + # scikit-image + # torch +ninja==1.11.1.1 + # via easyocr +numpy==1.26.4 + # via + # -r requirements.in + # albucore + # albumentations + # cnocr + # cnstd + # contourpy + # datasets + # easyocr + # evaluate + # imageio + # matplotlib + # onnx + # onnxruntime + # opencv-python + # opencv-python-headless + # optimum + # pandas + # pyarrow + # pytorch-lightning + # rapidocr + # scikit-image + # scipy + # seaborn + # shapely + # tifffile + # torchmetrics + # torchvision + # transformers + # ultralytics-thop +omegaconf==2.3.0 + # via rapidocr +onnx==1.15.0 + # via + # cnocr + # cnstd + # optimum +onnxruntime==1.17.0 + # via + # cnocr + # optimum +opencv-python==4.9.0.80 + # via + # -r requirements.in + # cnstd + # doclayout-yolo + # rapidocr + # ultralytics +opencv-python-headless==4.9.0.80 + # via + # albucore + # albumentations + # easyocr +optimum[onnxruntime]==1.16.2 + # via -r requirements.in +packaging==23.2 + # via + # datasets + # evaluate + # huggingface-hub + # lightning-utilities + # matplotlib + # onnxruntime + # optimum + # pytorch-lightning + # scikit-image + # torchmetrics + # transformers +pandas==2.2.0 + # via + # cnstd + # datasets + # doclayout-yolo + # evaluate + # seaborn + # ultralytics +pillow==10.2.0 + # via + # -r requirements.in + # cnocr + # cnstd + # doclayout-yolo + # easyocr + # imageio + # matplotlib + # rapidocr + # scikit-image + # torchvision + # ultralytics +protobuf==4.25.3 + # via + # onnx + # onnxruntime + # optimum + # transformers + # wandb +psutil==5.9.8 + # via + # doclayout-yolo + # ultralytics + # wandb +py-cpuinfo==9.0.0 + # via + # doclayout-yolo + # ultralytics +pyarrow==15.0.0 + # via datasets +pyarrow-hotfix==0.6 + # via datasets +pyclipper==1.3.0.post5 + # via + # cnstd + # easyocr + # rapidocr +pydantic==2.9.2 + # via albumentations +pydantic-core==2.23.4 + # via pydantic +pymupdf==1.24.1 + # via -r requirements.in +pymupdfb==1.24.1 + # via pymupdf +pyparsing==3.1.1 + # via matplotlib +pyspellchecker==0.8.1 + # via -r requirements.in +python-bidi==0.4.2 + # via easyocr +python-dateutil==2.8.2 + # via + # matplotlib + # pandas +pytorch-lightning==2.2.0.post0 + # via + # cnocr + # cnstd +pytz==2024.1 + # via pandas +pyyaml==6.0.1 + # via + # albumentations + # cnstd + # datasets + # doclayout-yolo + # easyocr + # huggingface-hub + # omegaconf + # pytorch-lightning + # rapidocr + # transformers + # ultralytics + # wandb +rapidocr==3.2.0 + # via + # cnocr + # cnstd +regex==2023.12.25 + # via transformers +requests==2.31.0 + # via + # datasets + # doclayout-yolo + # evaluate + # fsspec + # huggingface-hub + # rapidocr + # responses + # torchvision + # transformers + # ultralytics + # wandb +responses==0.18.0 + # via evaluate +safetensors==0.4.2 + # via transformers +scikit-image==0.22.0 + # via easyocr +scipy==1.12.0 + # via + # albumentations + # cnstd + # doclayout-yolo + # easyocr + # scikit-image + # ultralytics +seaborn==0.13.2 + # via + # cnstd + # doclayout-yolo + # ultralytics +sentencepiece==0.1.99 + # via transformers +sentry-sdk==1.40.4 + # via wandb +setproctitle==1.3.3 + # via wandb +shapely==2.0.2 + # via + # cnstd + # easyocr + # rapidocr +simsimd==6.0.5 + # via albucore +six==1.16.0 + # via + # docker-pycreds + # python-bidi + # python-dateutil + # rapidocr +smmap==5.0.1 + # via gitdb +stringzilla==3.10.10 + # via albucore +sympy==1.12 + # via + # onnxruntime + # optimum + # torch +thop==0.1.1.post2209072238 + # via doclayout-yolo +tifffile==2024.2.12 + # via scikit-image +tokenizers==0.15.2 + # via transformers +torch==2.2.0 + # via + # -r requirements.in + # cnocr + # cnstd + # doclayout-yolo + # easyocr + # optimum + # pytorch-lightning + # thop + # torchmetrics + # torchvision + # ultralytics + # ultralytics-thop +torchmetrics==1.3.1 + # via + # cnocr + # pytorch-lightning +torchvision==0.17.0 + # via + # -r requirements.in + # cnocr + # cnstd + # doclayout-yolo + # easyocr + # ultralytics +tqdm==4.66.2 + # via + # -r requirements.in + # cnocr + # cnstd + # datasets + # doclayout-yolo + # evaluate + # huggingface-hub + # pytorch-lightning + # rapidocr + # transformers + # ultralytics +transformers[sentencepiece]==4.37.2 + # via + # -r requirements.in + # optimum +typing-extensions==4.9.0 + # via + # huggingface-hub + # lightning-utilities + # pydantic + # pydantic-core + # pytorch-lightning + # torch +tzdata==2024.1 + # via pandas +ultralytics==8.2.32 + # via cnstd +ultralytics-thop==0.2.8 + # via ultralytics +unidecode==1.3.8 + # via cnstd +urllib3==2.2.0 + # via + # requests + # responses + # sentry-sdk +wandb==0.16.3 + # via cnocr +xxhash==3.4.1 + # via + # datasets + # evaluate +yarl==1.9.4 + # via aiohttp + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..57d631c3f004f9f6e24c31388e4df58801b8aba1 --- /dev/null +++ b/scripts/__init__.py @@ -0,0 +1 @@ +# coding: utf-8 diff --git a/scripts/screenshot_daemon.py b/scripts/screenshot_daemon.py new file mode 100644 index 0000000000000000000000000000000000000000..913a9846b82f9b0fe1aecbb2f639d515f22702f5 --- /dev/null +++ b/scripts/screenshot_daemon.py @@ -0,0 +1,78 @@ +# coding: utf-8 +# Copyright (C) 2022, [Breezedeus](https://github.com/breezedeus). + +# 安装 pyperclip +# > pip install pyperclip + +import os +import time +import glob + +import pyperclip as pc + +from pix2text import set_logger, Pix2Text, merge_line_texts, render_html + +logger = set_logger(log_level='DEBUG') + +SCREENSHOT_DIR = os.getenv( + "SCREENSHOT_DIR", '/Users/king/Pictures/screenshot_from_xnip' +) + +thresholds = { + 'formula2general': 0.65, # 如果识别为 `formula` 类型,但得分小于此阈值,则改为 `general` 类型 + 'english2general': 0.75, # 如果识别为 `english` 类型,但得分小于此阈值,则改为 `general` 类型 +} +config = dict(analyzer=dict(model_name='mfd-1.5'), thresholds=thresholds) +P2T = Pix2Text.from_config(config) + + +def get_newest_fp_time(screenshot_dir): + fn_list = glob.glob1(screenshot_dir, '*g') + fp_list = [os.path.join(screenshot_dir, fn) for fn in fn_list] + if not fp_list: + return None, None + fp_list.sort(key=lambda fp: os.path.getmtime(fp), reverse=True) + return fp_list[0], os.path.getmtime(fp_list[0]) + + +def recognize(screenshot_dir, delta_interval): + while True: + newest_fp, newest_mod_time = get_newest_fp_time(screenshot_dir) + if ( + newest_mod_time is not None + and time.time() - newest_mod_time < delta_interval + ): + logger.info(f'analyzing screenshot file {newest_fp}') + image_type, result = _recognize_newest(newest_fp) + logger.info('image type: %s, image text: %s', image_type, result) + if result: + pc.copy(result) + # render_html('./analysis_res.jpg', image_type, result, out_html_fp='out-text.html') + time.sleep(1) + + +def _recognize_newest(newest_fp): + res = P2T.recognize( + newest_fp, + use_analyzer=True, + save_analysis_res='./analysis_res.jpg', + embed_sep=(' $$', '$$ '), + isolated_sep=('\n', '\n'), + ) + if len(res) == 1: + return res[0]['type'], res[0]['text'] + elif len(res) > 1: + box_types = set([info['type'] for info in res]) + if len(box_types) > 1: + image_type = 'hybrid' + else: + image_type = list(box_types)[0] + text = merge_line_texts(res, auto_line_break=True) + + return image_type, text + + return 'general', '' + + +if __name__ == '__main__': + recognize(SCREENSHOT_DIR, 1.05) diff --git a/scripts/try_pix2text_mfr.py b/scripts/try_pix2text_mfr.py new file mode 100644 index 0000000000000000000000000000000000000000..f8555ec0bab843d36414713504a519217e4e4db7 --- /dev/null +++ b/scripts/try_pix2text_mfr.py @@ -0,0 +1,86 @@ +# coding: utf-8 +#! pip install pillow transformers optimum[onnxruntime] +from PIL import Image +from transformers import TrOCRProcessor +from optimum.onnxruntime import ORTModelForVision2Seq +from transformers import VisionEncoderDecoderModel + +def test_tokenizer_consistency(processor, test_strings=None): + """ + 测试Tokenizer的编码和解码是否一致 + + Args: + processor: TrOCRProcessor实例 + test_strings (list): 要测试的字符串列表 + """ + if test_strings is None: + test_strings = [ + # "Hello, world!", + # "你好,世界!", + # "12345", + # "1 + 1 = 2", + # "The quick brown fox jumps over the lazy dog.", + # "测试一下中文和English混合的情况", + # "\mathcal{L}_{\mathrm{e y e l i d}} \,=\sum_{t=1}^{T} \sum_{v=1}^{V} \mathcal{M}_{v}^{\mathrm{( e y e l i d )}} \left( \left\| \hat{h}_{t, v}-x_{t, v} \right\|^{2} \right)", + "\\hat { N } _ { 3 } = \\sum \\sp f _ { j = 1 } a _ { j } \\sp { \\dagger } a _ { j } .", + ] + + print("\n" + "="*50) + print("Testing Tokenizer Consistency") + print("="*50) + + all_passed = True + for text in test_strings: + # 编码 + encoded = processor.tokenizer.encode_plus(text, return_tensors="pt") + outs = processor.tokenizer( + [text], + padding="max_length", + truncation=True, + max_length=512, + )["input_ids"] + input_ids = encoded["input_ids"][0] + breakpoint() + + # 解码 + decoded = processor.tokenizer.decode(input_ids, skip_special_tokens=True) + + # 比较 + is_match = (text == decoded) + if not is_match: + all_passed = False + + print(f"\nOriginal: {repr(text)}") + print(f"Encoded: {input_ids.tolist()}") + print(f"Decoded: {repr(decoded)}") + print(f"Match: {is_match}") + + print("\n" + "="*50) + if all_passed: + print("✅ All tests passed! Tokenizer encoding and decoding are consistent.") + else: + print("❌ Some tests failed. Tokenizer encoding and decoding are not consistent.") + print("="*50 + "\n") + +model = 'breezedeus/pix2text-mfr' +model = 'models/checkpoint-683356' +processor = TrOCRProcessor.from_pretrained(model) + +# 测试Tokenizer的编码和解码是否一致 +# test_tokenizer_consistency(processor) + +# model = ORTModelForVision2Seq.from_pretrained(model, use_cache=False) + +model = VisionEncoderDecoderModel.from_pretrained(model) + +image_fps = [ + # 'https://github.com/breezedeus/Pix2Text/blob/main/docs/examples/formula.jpg?raw=true', + 'docs/examples/formula.jpg', + # '/Users/king/Documents/WhatIHaveDone/Test/syndoc/output-latex/sqrt_tex/150-cmbright.jpg' + # 'examples/0000186.png', +] +images = [Image.open(fp).convert('RGB') for fp in image_fps] +pixel_values = processor(images=images, return_tensors="pt").pixel_values +generated_ids = model.generate(pixel_values) +generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True) +print(f'generated_ids: {generated_ids}, \ngenerated text: {generated_text}') diff --git a/scripts/try_service.py b/scripts/try_service.py new file mode 100644 index 0000000000000000000000000000000000000000..6579113388de3b1b623c2667485dec530136ba25 --- /dev/null +++ b/scripts/try_service.py @@ -0,0 +1,36 @@ +# coding: utf-8 + +import requests + + +def main(): + url = 'http://0.0.0.0:8503/pix2text' + + image_fp = 'docs/examples/page2.png' + # image_fp = 'docs/examples/mixed.jpg' + # image_fp = 'docs/examples/math-formula-42.png' + # image_fp = 'docs/examples/english.jpg' + data = { + "file_type": "page", + "resized_shape": 768, + "embed_sep": " $,$ ", + "isolated_sep": "$$\n, \n$$" + } + files = { + "image": (image_fp, open(image_fp, 'rb'), 'image/jpeg') + } + + r = requests.post(url, data=data, files=files) + + outs = r.json()['results'] + out_md_dir = r.json()['output_dir'] + if isinstance(outs, str): + only_text = outs + else: + only_text = '\n'.join([out['text'] for out in outs]) + print(f'{only_text=}') + print(f'{out_md_dir=}') + + +if __name__ == '__main__': + main() diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..b08aa738ec5c0851110f3e9af839ad19d59bcf19 --- /dev/null +++ b/setup.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +# coding: utf-8 +# [Pix2Text](https://github.com/breezedeus/pix2text): an Open-Source Alternative to Mathpix. +# Copyright (C) 2022-2024, [Breezedeus](https://www.breezedeus.com). + +import os +from setuptools import find_packages, setup +from pathlib import Path + +PACKAGE_NAME = "pix2text" + +here = Path(__file__).parent + +long_description = (here / "README.md").read_text(encoding="utf-8") + +about = {} +exec( + (here / PACKAGE_NAME.replace('.', os.path.sep) / "__version__.py").read_text( + encoding="utf-8" + ), + about, +) + +required = [ + "click", + "tqdm", + "numpy", + "opencv-python", + "cnocr[ort-cpu]>=2.3.0.2", + "cnstd>=1.2.4.2", + "pillow", + "torch", + "torchvision", + "transformers>=4.37.0", + "optimum[onnxruntime]", + "PyMuPDF", + "pyspellchecker", + "doclayout-yolo<0.1", +] +extras_require = { + "multilingual": ["easyocr"], + "dev": ["pip-tools", "pytest"], + "serve": ["uvicorn[standard]", "fastapi", "python-multipart", "pydantic"], + "vlm": ["litellm"], +} + +entry_points = """ +[console_scripts] +p2t = pix2text.cli:cli +""" + +setup( + name=PACKAGE_NAME, + version=about['__version__'], + description="An Open-Source Python3 tool for recognizing layouts, tables, math formulas, and text in images, converting them into Markdown format. A free alternative to Mathpix, empowering seamless conversion of visual content into text-based representations.", + long_description=long_description, + long_description_content_type="text/markdown", + author='breezedeus', + author_email='breezedeus@163.com', + license='MIT', + url='https://github.com/breezedeus/pix2text', + platforms=["Mac", "Linux", "Windows"], + packages=find_packages(), + include_package_data=True, + # data_files=[('', ['pix2text/doc_xl_layout/map_info.json',],)], + entry_points=entry_points, + install_requires=required, + extras_require=extras_require, + zip_safe=False, + classifiers=[ + 'Development Status :: 4 - Beta', + 'Operating System :: OS Independent', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python', + 'Programming Language :: Python :: Implementation', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + ], +) diff --git a/tests/test_doc_xl_layout_parser.py b/tests/test_doc_xl_layout_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..916459744bb0a32d289d2c1e70443f7853d52600 --- /dev/null +++ b/tests/test_doc_xl_layout_parser.py @@ -0,0 +1,13 @@ +# coding: utf-8 +from pix2text.doc_xl_layout import DocXLayoutParser +from pix2text.utils import set_logger + +logger = set_logger() + + +def test_doc_xl_layout_parser(): + # model_fp = os.path.expanduser('~/.pix2text/1.0/doc_xl_layout/DocXLayout_231012.pth') + img_fp = '/Users/king/Documents/WhatIHaveDone/Test/pix2text/docs/examples/page2.png' + layout_parser = DocXLayoutParser(debug=1) + out, column_meta = layout_parser.parse(img_fp) + print(out) diff --git a/tests/test_doclayout_yolo.py b/tests/test_doclayout_yolo.py new file mode 100644 index 0000000000000000000000000000000000000000..c451a63423c656616653a6b7193660b0a2ab8dd5 --- /dev/null +++ b/tests/test_doclayout_yolo.py @@ -0,0 +1,15 @@ +# coding: utf-8 +from doclayout_yolo import YOLOv10 + +# Load the pre-trained model +model = YOLOv10("doclayout_yolo_docstructbench_imgsz1024.pt") +img = 'docs/examples/page-authors-1.png' +# img = 'docs/examples/page.png' +# Perform prediction +det_res = model.predict( + img, # Image to predict + imgsz=1024, # Prediction image size + conf=0.2, # Confidence threshold + device="mps" # Device to use (e.g., 'cuda:0' or 'cpu') +)[0] +print(det_res.boxes) \ No newline at end of file diff --git a/tests/test_latex_ocr.py b/tests/test_latex_ocr.py new file mode 100644 index 0000000000000000000000000000000000000000..92abcde5b9cbd4bb4d86427b82cabcf5ab7ef6cb --- /dev/null +++ b/tests/test_latex_ocr.py @@ -0,0 +1,59 @@ +# coding: utf-8 +import os +import os.path +import time + +from pix2text import set_logger, read_img +from pix2text.latex_ocr import * + +logger = set_logger() + + +def test_download_model(): + latex_ocr = LatexOCR() + + image_fps = [ + 'docs/examples/formula.jpg', + 'docs/examples/math-formula-42.png', + ] + start_time = time.time() + outs = latex_ocr.recognize(image_fps) + logger.info(f'average cost time: {(time.time() - start_time) / len(image_fps):.4f} seconds') + for img, out in zip(image_fps, outs): + logger.info(f'- image: {img}, out: \n\t{out}') + + +def test_infer_with_transformers(): + from PIL import Image + from transformers import TrOCRProcessor + from optimum.onnxruntime import ORTModelForVision2Seq + + model_dir = os.path.expanduser('~/.pix2text/1.1/mfr-1.5-onnx') + processor = TrOCRProcessor.from_pretrained(model_dir) + model = ORTModelForVision2Seq.from_pretrained(model_dir, use_cache=False) + + image_fps = [ + 'docs/examples/formula.jpg', + 'docs/examples/math-formula-42.png', + ] + images = [read_img(fp, return_type='Image') for fp in image_fps] + pixel_values = processor(images=images, return_tensors="pt").pixel_values + # print(f'pixel_values', pixel_values) + generated_ids = model.generate(pixel_values) + generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True) + print(f'generated_ids: {generated_ids}, \ngenerated text: {generated_text}') + + +def test_infer(): + more_model_configs = {} + latex_ocr = LatexOCR(more_model_configs=more_model_configs) + + image_fps = [ + 'docs/examples/formula.jpg', + 'docs/examples/math-formula-42.png', + ] + start_time = time.time() + outs = latex_ocr.recognize(image_fps, batch_size=2) + logger.info(f'average cost time: {(time.time() - start_time) / len(image_fps):.4f} seconds') + for img, out in zip(image_fps, outs): + logger.info(f'- image: {img}, out: \n\t{out}') \ No newline at end of file diff --git a/tests/test_mfd.py b/tests/test_mfd.py new file mode 100644 index 0000000000000000000000000000000000000000..44a1e96ac9062cd74f35101ae6dd8697ea058860 --- /dev/null +++ b/tests/test_mfd.py @@ -0,0 +1,20 @@ +# coding: utf-8 + +from pix2text import set_logger +from pix2text import MathFormulaDetector + +logger = set_logger() + + +def test_formula_detector(): + det = MathFormulaDetector() + image_fps = [ + 'docs/examples/mixed.jpg', + 'docs/examples/vietnamese.jpg', + ] + outs = det.detect(image_fps[0]) + print(outs) + outs = det.detect(image_fps) + # outs = det.detect(image_fps, visualize=True, save=True) + print(outs) + diff --git a/tests/test_pix2text.py b/tests/test_pix2text.py new file mode 100644 index 0000000000000000000000000000000000000000..a8ca41e8c61e8e0c5490ea5670469790df3a25cf --- /dev/null +++ b/tests/test_pix2text.py @@ -0,0 +1,229 @@ +# coding: utf-8 + +import os + +from pix2text import Pix2Text, set_logger + +set_logger() + + +def test_recognize_pdf(): + pdf_fn = '1804.07821' + img_fp = f'./docs/examples/{pdf_fn}.pdf' + text_formula_config = dict( + languages=('en', 'ch_sim'), + mfd=dict( # 声明 MFD 的初始化参数 + model_path=os.path.expanduser( + '~/.pix2text/1.1/mfd-onnx/mfd-v20240618.onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + formula=dict( + model_name='mfr-pro', + model_backend='onnx', + model_dir=os.path.expanduser( + '~/.pix2text/1.1/mfr-pro-onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + text=dict( + rec_model_name='doc-densenet_lite_666-gru_large', + rec_model_backend='onnx', + rec_model_fp=os.path.expanduser( + '~/.cnocr/2.3/doc-densenet_lite_666-gru_large/cnocr-v2.3-doc-densenet_lite_666-gru_large-epoch=005-ft-model.onnx' + # noqa + ), # 注:修改成你的模型文件所存储的路径 + ), + ) + total_config = { + 'layout': {}, + 'text_formula': text_formula_config, + } + p2t = Pix2Text.from_config(total_configs=total_config, enable_formula=True) + out_md = p2t.recognize_pdf( + img_fp, + page_numbers=[0, 7, 8], + table_as_image=True, + save_debug_res=f'./outputs-{pdf_fn}', + ) + out_md.to_markdown('page-output') + # print(out_page) + # out_page.to_markdown('page-output') + + +def test_recognize_page(): + # img_fp = './docs/examples/formula.jpg' + img_fp = './docs/examples/page2.png' + # img_fp = './docs/examples/mixed.jpg' + total_config = { + 'layout': {}, + 'text_formula': { + 'formula': { + 'model_name': 'mfr-1.5', + 'model_backend': 'onnx', + 'more_model_configs': {'provider': 'CPUExecutionProvider'}, + } + }, + } + p2t = Pix2Text.from_config(total_configs=total_config) + out_page = p2t.recognize_page( + img_fp, + page_id='test_page_1', + title_contain_formula=False, + text_contain_formula=True, + save_debug_res='./outputs', + ) + # print(out_page) + out_page.to_markdown('page-output') + + +def test_spell_checker(): + from spellchecker import SpellChecker + + spell = SpellChecker() + + # 找到拼写错误 + misspelled = spell.unknown(["speci-fied"]) + + for word in misspelled: + # Get the one `most likely` answer + print('word:', word, ' ->', spell.correction(word)) + + # Get a list of `likely` options + print('suggestions:', spell.candidates(word)) + + +def test_blog_example(): + img_fp = './docs/examples/mixed.jpg' + + text_formula_config = dict( + mfd=dict( # 声明 MFD 的初始化参数 + model_path=os.path.expanduser( + '~/.pix2text/1.1/mfd-onnx/mfd-v20240618.onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + formula=dict( + model_name='mfr-pro', + model_backend='onnx', + model_dir=os.path.expanduser( + '~/.pix2text/1.1/mfr-pro-onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + ) + total_config = { + 'layout': {'scores_thresh': 0.2}, + 'text_formula': text_formula_config, + } + p2t = Pix2Text.from_config(total_configs=total_config) + outs = p2t.recognize_page( + img_fp, + resized_shape=608, + page_id='test_page_2', + save_layout_res='./layout_res-mixed.jpg', + ) # 也可以使用 `p2t(img_fp)` 获得相同的结果 + print(outs) + + +def test_blog_pro_example(): + img_fp = './docs/examples/mixed.jpg' + + text_formula_config = dict( + languages=('en', 'ch_sim'), + mfd=dict( # 声明 MFD 的初始化参数 + model_path=os.path.expanduser( + '~/.pix2text/1.1/mfd-onnx/mfd-v20240618.onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + formula=dict( + model_name='mfr-pro', + model_backend='onnx', + model_dir=os.path.expanduser( + '~/.pix2text/1.1/mfr-pro-onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + text=dict( + rec_model_name='doc-densenet_lite_666-gru_large', + rec_model_backend='onnx', + rec_model_fp=os.path.expanduser( + '~/.cnocr/2.3/doc-densenet_lite_666-gru_large/cnocr-v2.3-doc-densenet_lite_666-gru_large-epoch=005-ft-model.onnx' + # noqa + ), # 注:修改成你的模型文件所存储的路径 + ), + ) + p2t = Pix2Text.from_config(total_configs={'text_formula': text_formula_config}) + outs = p2t.recognize_page( + img_fp, resized_shape=608, page_id='test_page_3' + ) # 也可以使用 `p2t(img_fp)` 获得相同的结果 + print(outs) + + +def test_example_mixed(): + img_fp = './docs/examples/en1.jpg' + p2t = Pix2Text.from_config() + outs = p2t.recognize_page( + img_fp, resized_shape=608, page_id='test_page_4' + ) # 也可以使用 `p2t(img_fp)` 获得相同的结果 + print(outs) + + +def test_example_formula(): + img_fp = './docs/examples/math-formula-42.png' + p2t = Pix2Text.from_config() + outs = p2t.recognize_formula(img_fp) + print(outs) + + +def test_example_text(): + img_fp = './docs/examples/general.jpg' + p2t = Pix2Text(enable_formula=False) + outs = p2t.recognize_text(img_fp) + print(outs) + + +def test_vlm_recognize_page(): + import dotenv + dotenv.load_dotenv() + + model_name=os.getenv("GEMINI_MODEL") + api_key=os.getenv("GEMINI_API_KEY") + + # img_fp = './docs/examples/formula.jpg' + # img_fp = './docs/examples/page2.png' + img_fp = './docs/examples/mixed.jpg' + total_config = { + 'layout': None, + 'text_formula': { + "model_type": "VlmTextFormulaOCR", + "model_name": model_name, + "api_key": api_key, + }, + "table": { + "model_type": "VlmTableOCR", + "model_name": model_name, + "api_key": api_key, + }, + } + p2t = Pix2Text.from_config(total_configs=total_config) + tf_out = p2t.recognize_text_formula(img=img_fp, resized_shape=768, return_text=False) + print(tf_out) + # out_page = p2t.recognize_page( + # img_fp, + # page_id='test_page_1', + # title_contain_formula=False, + # text_contain_formula=True, + # save_debug_res='./outputs', + # ) + # print(out_page) + # out_page.to_markdown('page-output') + + +def test_multilingual_ocr(): + img_fp = 'docs/examples/vietnamese.jpg' + img_fp = 'docs/feedbacks/ru.png' + total_config = { + "layout": {}, + "text_formula": {"languages": ("ru",)}, + } + p2t = Pix2Text.from_config(total_configs=total_config) + outs = p2t.recognize( + img_fp, file_type="text_formula", return_text=True, auto_line_break=False + ) + print(outs) diff --git a/tests/test_post_processors.py b/tests/test_post_processors.py new file mode 100644 index 0000000000000000000000000000000000000000..3bb700c54d3eab1afd1388c6f080833142ba4b35 --- /dev/null +++ b/tests/test_post_processors.py @@ -0,0 +1,86 @@ +# coding: utf-8 +from pix2text.latex_ocr import * + + +def test_remove_redundant_script(): + latex_strs = [ + ('^ { abc }', 'abc'), + ('^ { { a + b } }', '{ a + b }'), + ('_ { abc }', 'abc'), + ('_ { { a + b } }', '{ a + b }'), + ('\\sum _ { t = 1 } ^ { T }', '\\sum _ { t = 1 } ^ { T }'), + ] + + for ori, res in latex_strs: + assert remove_redundant_script(ori) == res + + +def test_remove_empty_text(): + latex_strs = [ + ( + 'J _ { \\stackrel { \\arraycolsep } { 0 p t } { G } } ^ { }', + 'J _ { \\stackrel { \\arraycolsep } { 0 p t } { G } }', + ), + ('\\hat { }', ''), + ('\\hat { } _ { } : h = 0. 5', ': h = 0. 5'), + ('\\sum _ { t = 1 } ^ { T }', '\\sum _ { t = 1 } ^ { T }'), + ] + + for ori, res in latex_strs: + assert remove_empty_text(ori) == res + + +def test_remove_trailing_whitespace(): + latex_strs = [ + ('abc \\qquad \\qquad \\qquad', 'abc'), + ('abc \\qquad \\quad \\qquad', 'abc'), + ('abc \\qquad \\ \\quad \\qquad', 'abc'), + ('abc \\, \\, \\, \\, \\, \\, \\,', 'abc'), + ('f ^ { \\prime } \\ = \\ \\ ', 'f ^ { \\prime } \\ ='), + ('\\sum _ { t = 1 } ^ { T }', '\\sum _ { t = 1 } ^ { T }'), + ] + + for ori, res in latex_strs: + assert remove_trailing_whitespace(ori) == res + + +def test_remove_unnecessary_spaces(): + latex_strs = [ + ('{ \\cal L }', '{\\cal L}'), # 保留命令后紧跟大写字母的空格 + ('\\textbf {bold text}', '\\textbf{bold text}'), # 移除命令后的空格 + ('a + b = c', 'a+b=c'), # 数学模式内的空格被移除 + ('\\frac{ 1 }{ 2 }', '\\frac{1}{2}'), # 移除大括号内的空格 + ('\\sum_{ i = 1 }^{ N }', '\\sum_{i=1}^{N}'), # 移除下标和上标中的空格 + ('\\alpha \\, \\beta', '\\alpha\\, \\beta'), # 保留显式间距调整命令的空格 + ('\\sqrt { x } + \\sqrt { y }', '\\sqrt{x}+\\sqrt{y}'), # 移除大括号内的空格,保留操作符周围的空格 + ('\\textit {italic text} with space', '\\textit{italic text} with space'), # 移除命令后的空格,保留文本中的空格 + ('\\mathrm { a b c }', '\\mathrm{a b c}'), # 移除命令后的空格 + ('\\sum _ {t=1} ^ {T} 4 _ { 2 }', '\\sum_{t=1}^{T} 4_{2}'), + ('\\sim q ( z | x )', '\\sim q ( z | x )'), + ] + + for ori, res in latex_strs: + assert remove_unnecessary_spaces(ori) == res + + +def test_fix_latex(): + latex_strs = [ + ('\\left \\frac{1}{2}', '\\frac{1}{2}'), + ('\\left ( \\frac{1}{2} + \\left \\frac{1}{2} \\right )', '\\left ( \\frac{1}{2} + \\frac{1}{2} \\right )'), + ('\\left( \\frac{1}{2} + \\left \\frac{1}{2} \\right)', '\\left( \\frac{1}{2} + \\frac{1}{2} \\right)'), + ] + + for ori, res in latex_strs: + assert fix_latex(ori) == res + + +def test_replace_illegal_symbols(): + latex_strs = [ + (r'a^2 \.', r'a^2 \ .'), + (r'a^2 \= \.', r'a^2 \ = \ .'), + (r'a^2 \- \.', r'a^2 \ - \ .'), + (r'a^2 \~ \.', r'a^2 \ ~ \ .'), + ] + + for ori, res in latex_strs: + assert replace_illegal_symbols(ori) == res \ No newline at end of file diff --git a/tests/test_sort_boxes.py b/tests/test_sort_boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..10c6549a37eaece55477393b2c2d9190ea7c1d1c --- /dev/null +++ b/tests/test_sort_boxes.py @@ -0,0 +1,56 @@ +# coding: utf-8 +import random +from pprint import pprint +import numpy as np + +from pix2text.utils import sort_and_filter_line_boxes, sort_boxes + + +def list2box(xmin, ymin, xmax, ymax): + return np.array([[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]) + + +def test_sort_line_boxes(): + boxes = [ + {'position': list2box(20, 0, 30, 25), 'id': 12}, # anchor,不能动 + {'position': list2box(0, 5, 20, 20), 'id': 11}, + {'position': list2box(30, 5, 40, 18), 'id': 13}, + {'position': list2box(38, 5, 60, 20), 'id': 14}, + {'position': list2box(21, 20, 40, 30), 'id': 22}, + ] + for box in boxes: + box['__line__'] = -1 # 所在行号,-1表示未分配 + boxes[0]['__line__'] = 1 + # random.shuffle(boxes) + outs = sort_and_filter_line_boxes(boxes, 'position') + pprint(outs) + + +def test_sort_boxes(): + boxes = [ + {'position': list2box(0, 5, 20, 20), 'id': 11}, + {'position': list2box(30, 5, 40, 18), 'id': 13}, + {'position': list2box(38, 5, 60, 20), 'id': 14}, + {'position': list2box(20, 0, 30, 25), 'id': 12}, + {'position': list2box(0, 25, 20, 45), 'id': 21}, + {'position': list2box(40, 25, 60, 44), 'id': 23}, + {'position': list2box(21, 20, 40, 30), 'id': 22}, + {'position': list2box(2, 85, 30, 105), 'id': 41}, + {'position': list2box(10, 55, 50, 80), 'id': 31}, + {'position': list2box(35, 83, 58, 103), 'id': 42}, + ] + + line_boxes = sort_boxes(boxes, 'position') + pprint(line_boxes) + + line_ids = [] + for boxes in line_boxes: + line_ids.append([box['id'] for box in boxes]) + + pprint(line_ids) + assert line_ids == [ + [11, 12, 13, 14], + [21, 22, 23], + [31], + [41, 42], + ] diff --git a/tests/test_table_ocr.py b/tests/test_table_ocr.py new file mode 100644 index 0000000000000000000000000000000000000000..6ba7e5e1da833180edb2feea08dbcab16a6f871a --- /dev/null +++ b/tests/test_table_ocr.py @@ -0,0 +1,44 @@ +# coding: utf-8 +import pytest +import os + +from pix2text.ocr_engine import prepare_ocr_engine +from pix2text.table_ocr import TableOCR + + +def test_recognize(): + image_path = 'docs/examples/table3.jpg' + os.environ['HF_ENDPOINT'] = os.getenv('HF_ENDPOINT', 'https://hf-mirror.com') + languages = ('en', 'ch_sim') + text_ocr = prepare_ocr_engine(languages, {}) + ocr = TableOCR(text_ocr=text_ocr) + result = ocr.recognize( + image_path, + out_csv=True, + out_cells=True, + out_objects=False, + out_html=True, + out_markdown=True, + save_analysis_res='out-table-rec.png', + ) + + print(result) + + +def test_recognize2(): + image_path = 'docs/examples/table3.jpg' + os.environ['HF_ENDPOINT'] = os.getenv('HF_ENDPOINT', 'https://hf-mirror.com') + languages = ('en', 'ch_sim') + text_ocr = prepare_ocr_engine(languages, {}) + ocr = TableOCR.from_config(text_ocr=text_ocr) + result = ocr.recognize( + image_path, + out_csv=True, + out_cells=True, + out_objects=False, + out_html=True, + out_markdown=True, + save_analysis_res='out-table-rec.png', + ) + + print(result) diff --git a/tests/test_text_formula_ocr.py b/tests/test_text_formula_ocr.py new file mode 100644 index 0000000000000000000000000000000000000000..21c12fcb5f4eb7b50f94ec0e737ab9269118003d --- /dev/null +++ b/tests/test_text_formula_ocr.py @@ -0,0 +1,121 @@ +# coding: utf-8 + +import os + +from pix2text import TextFormulaOCR, merge_line_texts + + +def test_mfd(): + config = dict() + model = TextFormulaOCR.from_config(config) + + res = model.recognize( + './docs/examples/zh1.jpg', save_analysis_res='./analysis_res.jpg', + ) + print(res) + + +def test_example(): + # img_fp = './docs/examples/formula.jpg' + img_fp = './docs/examples/mixed.jpg' + formula_config = { + 'model_name': 'mfr-pro', + 'model_backend': 'onnx', + } + p2t = TextFormulaOCR.from_config(total_configs={'formula': formula_config}) + print(p2t.recognize(img_fp)) + # print(p2t.recognize_formula(img_fp)) + # outs = p2t(img_fp, resized_shape=608, save_analysis_res='./analysis_res.jpg') # can also use `p2t.recognize(img_fp)` + # print(outs) + # # To get just the text contents, use: + # only_text = merge_line_texts(outs, auto_line_break=True) + # print(only_text) + + +def test_blog_example(): + img_fp = './docs/examples/mixed.jpg' + + total_config = dict( + mfd=dict( # 声明 MFD 的初始化参数 + model_path=os.path.expanduser( + '~/.pix2text/1.1/mfd-onnx/mfd-v20240618.onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + formula=dict( + model_name='mfr-pro', + model_backend='onnx', + model_dir=os.path.expanduser( + '~/.pix2text/1.1/mfr-pro-onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + ) + p2t = TextFormulaOCR.from_config(total_configs=total_config) + outs = p2t.recognize( + img_fp, resized_shape=608, return_text=False + ) # 也可以使用 `p2t(img_fp)` 获得相同的结果 + print(outs) + # 如果只需要识别出的文字和Latex表示,可以使用下面行的代码合并所有结果 + only_text = merge_line_texts(outs, auto_line_break=True) + print(only_text) + + +def test_blog_pro_example(): + img_fp = './docs/examples/mixed.jpg' + + total_config = dict( + languages=('en', 'ch_sim'), + mfd=dict( # 声明 MFD 的初始化参数 + model_path=os.path.expanduser( + '~/.pix2text/1.1/mfd-onnx/mfd-v20240618.onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + formula=dict( + model_name='mfr-pro', + model_backend='onnx', + model_dir=os.path.expanduser( + '~/.pix2text/1.1/mfr-pro-onnx' + ), # 注:修改成你的模型文件所存储的路径 + ), + text=dict( + rec_model_name='doc-densenet_lite_666-gru_large', + rec_model_backend='onnx', + rec_model_fp=os.path.expanduser( + '~/.cnocr/2.3/doc-densenet_lite_666-gru_large/cnocr-v2.3-doc-densenet_lite_666-gru_large-epoch=005-ft-model.onnx' + # noqa + ), # 注:修改成你的模型文件所存储的路径 + ), + ) + p2t = TextFormulaOCR.from_config(total_configs=total_config) + outs = p2t.recognize( + img_fp, resized_shape=608, return_text=False + ) # 也可以使用 `p2t(img_fp)` 获得相同的结果 + print(outs) + # 如果只需要识别出的文字和Latex表示,可以使用下面行的代码合并所有结果 + only_text = merge_line_texts(outs, auto_line_break=True) + print(only_text) + + +def test_example_mixed(): + img_fp = './docs/examples/en1.jpg' + p2t = TextFormulaOCR.from_config() + outs = p2t.recognize( + img_fp, resized_shape=608, return_text=False + ) # 也可以使用 `p2t(img_fp)` 获得相同的结果 + print(outs) + # 如果只需要识别出的文字和Latex表示,可以使用下面行的代码合并所有结果 + only_text = merge_line_texts(outs, auto_line_break=True) + print(only_text) + + +def test_example_formula(): + img_fp = './docs/examples/math-formula-42.png' + p2t = TextFormulaOCR.from_config() + outs = p2t.recognize_formula(img_fp) + print(outs) + + +def test_example_text(): + img_fp = './docs/examples/general.jpg' + p2t = TextFormulaOCR() + outs = p2t.recognize_text(img_fp) + print(outs) diff --git a/tests/test_utils_img.py b/tests/test_utils_img.py new file mode 100644 index 0000000000000000000000000000000000000000..ccd14b962fb305b77ce3ee4002a108fc5811389c --- /dev/null +++ b/tests/test_utils_img.py @@ -0,0 +1,172 @@ +# coding: utf-8 +import os +import tempfile +from pathlib import Path + +import numpy as np +import pytest +from PIL import Image + +from pix2text.utils import convert_transparent_to_contrasting + + +def create_rgba_image(width=100, height=100, bg_color=(255, 0, 0, 0), fg_color=(0, 0, 255, 255)): + """Create a test RGBA image with transparent background and some foreground content.""" + # Create a fully transparent image + img = Image.new('RGBA', (width, height), bg_color) + + # Add some non-transparent content in center + pixels = img.load() + for i in range(width//4, 3*width//4): + for j in range(height//4, 3*height//4): + pixels[i, j] = fg_color + + return img + + +def create_la_image(width=100, height=100, bg_value=0, fg_value=255): + """Create a test LA (grayscale with alpha) image.""" + # Create a fully transparent image + img = Image.new('LA', (width, height), (bg_value, 0)) + + # Add some non-transparent content in center + pixels = img.load() + for i in range(width//4, 3*width//4): + for j in range(height//4, 3*height//4): + pixels[i, j] = (fg_value, 255) + + return img + + +def create_p_image_with_transparency(width=100, height=100): + """Create a test palette mode (P) image with transparency.""" + # Start with an RGBA image + rgba = create_rgba_image(width, height) + + # Convert to palette mode with transparency + p_img = rgba.convert('P') + + # Set transparency + p_img.info['transparency'] = 0 + + return p_img + + +def create_rgb_image(width=100, height=100, color=(100, 150, 200)): + """Create a test RGB image.""" + return Image.new('RGB', (width, height), color) + + +def test_convert_rgba_transparent(): + """Test converting an RGBA image with transparency.""" + # Create test image + img = create_rgba_image() + + # Apply the function + result = convert_transparent_to_contrasting(img) + + # Verify the result + assert result.mode == 'RGB', "Result should be in RGB mode" + assert result.size == img.size, "Image dimensions should not change" + + # Convert to numpy array to check pixel values + result_array = np.array(result) + + # The background (originally transparent) should now have a contrasting color + # to the blue foreground we set in create_rgba_image + bg_sample = result_array[5, 5] # Sample from corner (background) + fg_sample = result_array[50, 50] # Sample from center (foreground) + + # Make sure background and foreground are different + assert not np.array_equal(bg_sample, fg_sample), "Background should have different color than foreground" + + +def test_convert_la_transparent(): + """Test converting an LA (grayscale with alpha) image with transparency.""" + img = create_la_image() + result = convert_transparent_to_contrasting(img) + + assert result.mode == 'RGB', "Result should be in RGB mode" + assert result.size == img.size, "Image dimensions should not change" + + +def test_convert_p_with_transparency(): + """Test converting a palette image with transparency.""" + img = create_p_image_with_transparency() + result = convert_transparent_to_contrasting(img) + + assert result.mode == 'RGB', "Result should be in RGB mode" + assert result.size == img.size, "Image dimensions should not change" + + +def test_convert_rgb_no_transparency(): + """Test converting an RGB image (no transparency).""" + # For RGB images, we just expect a converted copy + img = create_rgb_image() + result = convert_transparent_to_contrasting(img) + + assert result.mode == 'RGB', "Result should be in RGB mode" + assert result.size == img.size, "Image dimensions should not change" + + # The image should look the same as input (just ensured to be RGB) + img_rgb = img.convert('RGB') + assert np.array_equal(np.array(result), np.array(img_rgb)), "RGB image should not change visually" + + +def test_end_to_end(): + """ + Test the full workflow: create image, save it, read it, convert it, + then check the result matches expectations. + """ + # Create a temporary directory + with tempfile.TemporaryDirectory() as tmp_dir: + # Create and save a test image + test_path = os.path.join(tmp_dir, "test_transparent.png") + img = create_rgba_image() + img.save(test_path) + + # Read the image back and convert it + img_reopened = Image.open(test_path) + result = convert_transparent_to_contrasting(img_reopened) + + # Verify results + assert result.mode == 'RGB', "Result should be in RGB mode" + assert result.size == img.size, "Image dimensions should not change" + + # Save the result for comparison (optional) + result_path = os.path.join(tmp_dir, "test_result.jpg") + result.save(result_path) + + +def test_edge_cases(): + """Test edge cases like extremely small images or unusual color patterns.""" + # Test a 1x1 pixel transparent image + tiny_image = Image.new('RGBA', (1, 1), (255, 0, 0, 0)) + result = convert_transparent_to_contrasting(tiny_image) + assert result.mode == 'RGB', "Result should be in RGB mode" + assert result.size == (1, 1), "Image dimensions should not change" + + # Test a fully transparent image with no content + empty_image = Image.new('RGBA', (50, 50), (0, 0, 0, 0)) + result = convert_transparent_to_contrasting(empty_image) + assert result.mode == 'RGB', "Result should be in RGB mode" + + # Test an image with partially transparent pixels + partial_img = Image.new('RGBA', (50, 50), (0, 0, 0, 0)) + pixels = partial_img.load() + for i in range(50): + for j in range(50): + pixels[i, j] = (255, 0, 0, i % 255) # Varying alpha values + result = convert_transparent_to_contrasting(partial_img) + assert result.mode == 'RGB', "Result should be in RGB mode" + + +if __name__ == "__main__": + # Run tests manually if needed + test_convert_rgba_transparent() + test_convert_la_transparent() + test_convert_p_with_transparency() + test_convert_rgb_no_transparency() + test_end_to_end() + test_edge_cases() + print("All tests passed!") diff --git a/tests/test_vlm.py b/tests/test_vlm.py new file mode 100644 index 0000000000000000000000000000000000000000..9bf7a17e994b534dba02c9e7da63907185b630c3 --- /dev/null +++ b/tests/test_vlm.py @@ -0,0 +1,70 @@ +# coding=utf-8 +import os +import dotenv + +from pix2text import set_logger +from pix2text.vlm_api import Vlm +from pix2text.vlm_table_ocr import VlmTableOCR +from pix2text.text_formula_ocr import VlmTextFormulaOCR + +logger = set_logger() +# Load environment variables from .env file +dotenv.load_dotenv() + + +def init_vlm(): + GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") + GEMINI_MODEL = os.getenv("GEMINI_MODEL") + + return Vlm( + model_name=GEMINI_MODEL, + api_key=GEMINI_API_KEY, + ) + + +def test_vlm_api(): + img_path = "/Users/king/Documents/WhatIHaveDone/Test/pix2text/docs/feedbacks/2024-09-09.jpg" + img_path = "/Users/king/Documents/WhatIHaveDone/Test/pix2text/docs/examples/table-cn.jpg" # 表格,中文 + img_path = "/Users/king/Documents/WhatIHaveDone/Test/pix2text/docs/examples/ch_tra1.jpg" # 繁体中文 + # img_path = "/Users/king/Documents/WhatIHaveDone/Test/pix2text/docs/examples/hw-formula5.jpg" # 手写公式 + img_path = "docs/examples/hw-zh-en.jpg" # 手写文字 + img_path = "docs/examples/hw-zh1.jpg" # 手写文字 + img_path = "docs/examples/hw-zh2.jpg" # 手写文字 + img_path = "docs/examples/hw-zh3.jpg" # 手写文字 + img_path = ["docs/examples/hw-zh1.jpg", "docs/examples/hw-zh3.jpg"] # 手写文字 + + vlm = init_vlm() + result = vlm(img_path, auto_resize=True) + + # Print the result + print(result) + + +def test_vlm_table_ocr(): + img_path = "/Users/king/Documents/WhatIHaveDone/Test/pix2text/docs/examples/table-cn.jpg" # 表格,中文 + + vlm_table_ocr = VlmTableOCR.from_config( + model_name=os.getenv("GEMINI_MODEL"), + api_key=os.getenv("GEMINI_API_KEY"), + ) + result = vlm_table_ocr.recognize(img_path) + + # Print the result + print(result) + + +def test_vlm_text_formula_ocr(): + # img_path = "docs/examples/vietnamese.jpg" + img_path = "docs/examples/mixed.jpg" + img_path = "docs/examples/ch_tra1.jpg" + + vlm_text_formula_ocr = VlmTextFormulaOCR.from_config( + model_name=os.getenv("GEMINI_MODEL"), + api_key=os.getenv("GEMINI_API_KEY"), + enable_spell_checker=False, + ) + result = vlm_text_formula_ocr.recognize(img_path, resized_shape=768, return_text=False) + + # Print the result + print("识别结果:") + print(result) \ No newline at end of file