Commit ·
6c13a40
1
Parent(s): 59f6b3d
Replace fragile git dep with stable transformers>=5.1.0 for GLM-OCR
Browse filesGLM-OCR support landed in transformers v5.1.0. The previous unpinned git
dependency could break at any commit. Also updated docstring to note vLLM
nightly was re-verified on 2026-02-12.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
- glm-ocr.py +24 -12
glm-ocr.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
| 15 |
#
|
| 16 |
# [tool.uv]
|
| 17 |
# prerelease = "allow"
|
| 18 |
-
# override-dependencies = ["transformers
|
| 19 |
# ///
|
| 20 |
|
| 21 |
"""
|
|
@@ -25,7 +25,8 @@ GLM-OCR is a compact 0.9B parameter OCR model achieving 94.62% on OmniDocBench V
|
|
| 25 |
Uses CogViT visual encoder with GLM-0.5B language decoder and Multi-Token Prediction
|
| 26 |
(MTP) loss for fast, accurate document parsing.
|
| 27 |
|
| 28 |
-
NOTE: Requires vLLM nightly wheels
|
|
|
|
| 29 |
First run may take a few minutes to download and install dependencies.
|
| 30 |
|
| 31 |
Features:
|
|
@@ -38,7 +39,7 @@ Features:
|
|
| 38 |
- MIT licensed
|
| 39 |
|
| 40 |
Model: zai-org/GLM-OCR
|
| 41 |
-
vLLM: Requires vLLM nightly build + transformers
|
| 42 |
Performance: 94.62% on OmniDocBench V1.5
|
| 43 |
"""
|
| 44 |
|
|
@@ -140,7 +141,11 @@ def create_dataset_card(
|
|
| 140 |
) -> str:
|
| 141 |
"""Create a dataset card documenting the OCR process."""
|
| 142 |
model_name = model.split("/")[-1]
|
| 143 |
-
task_desc = {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
|
| 145 |
return f"""---
|
| 146 |
tags:
|
|
@@ -305,10 +310,7 @@ def main(
|
|
| 305 |
)
|
| 306 |
|
| 307 |
try:
|
| 308 |
-
batch_messages = [
|
| 309 |
-
make_ocr_message(img, task=task)
|
| 310 |
-
for img in batch_images
|
| 311 |
-
]
|
| 312 |
|
| 313 |
outputs = llm.chat(batch_messages, sampling_params)
|
| 314 |
|
|
@@ -347,7 +349,11 @@ def main(
|
|
| 347 |
|
| 348 |
def update_inference_info(example):
|
| 349 |
try:
|
| 350 |
-
existing_info =
|
|
|
|
|
|
|
|
|
|
|
|
|
| 351 |
except (json.JSONDecodeError, TypeError):
|
| 352 |
existing_info = []
|
| 353 |
existing_info.append(inference_entry)
|
|
@@ -385,9 +391,13 @@ def main(
|
|
| 385 |
card.push_to_hub(output_dataset, token=HF_TOKEN)
|
| 386 |
|
| 387 |
logger.info("Done! GLM-OCR processing complete.")
|
| 388 |
-
logger.info(
|
|
|
|
|
|
|
| 389 |
logger.info(f"Processing time: {processing_time_str}")
|
| 390 |
-
logger.info(
|
|
|
|
|
|
|
| 391 |
|
| 392 |
|
| 393 |
if __name__ == "__main__":
|
|
@@ -412,7 +422,9 @@ if __name__ == "__main__":
|
|
| 412 |
print("\n5. Running on HF Jobs:")
|
| 413 |
print(" hf jobs uv run --flavor l4x1 \\")
|
| 414 |
print(" -s HF_TOKEN \\")
|
| 415 |
-
print(
|
|
|
|
|
|
|
| 416 |
print(" input-dataset output-dataset --batch-size 16")
|
| 417 |
print("\nFor full help: uv run glm-ocr.py --help")
|
| 418 |
sys.exit(0)
|
|
|
|
| 15 |
#
|
| 16 |
# [tool.uv]
|
| 17 |
# prerelease = "allow"
|
| 18 |
+
# override-dependencies = ["transformers>=5.1.0"]
|
| 19 |
# ///
|
| 20 |
|
| 21 |
"""
|
|
|
|
| 25 |
Uses CogViT visual encoder with GLM-0.5B language decoder and Multi-Token Prediction
|
| 26 |
(MTP) loss for fast, accurate document parsing.
|
| 27 |
|
| 28 |
+
NOTE: Requires vLLM nightly wheels (checked 2026-02-12, still needed) and
|
| 29 |
+
transformers>=5.1.0 (GLM-OCR support landed in stable release).
|
| 30 |
First run may take a few minutes to download and install dependencies.
|
| 31 |
|
| 32 |
Features:
|
|
|
|
| 39 |
- MIT licensed
|
| 40 |
|
| 41 |
Model: zai-org/GLM-OCR
|
| 42 |
+
vLLM: Requires vLLM nightly build + transformers>=5.1.0
|
| 43 |
Performance: 94.62% on OmniDocBench V1.5
|
| 44 |
"""
|
| 45 |
|
|
|
|
| 141 |
) -> str:
|
| 142 |
"""Create a dataset card documenting the OCR process."""
|
| 143 |
model_name = model.split("/")[-1]
|
| 144 |
+
task_desc = {
|
| 145 |
+
"ocr": "text recognition",
|
| 146 |
+
"formula": "formula recognition",
|
| 147 |
+
"table": "table recognition",
|
| 148 |
+
}
|
| 149 |
|
| 150 |
return f"""---
|
| 151 |
tags:
|
|
|
|
| 310 |
)
|
| 311 |
|
| 312 |
try:
|
| 313 |
+
batch_messages = [make_ocr_message(img, task=task) for img in batch_images]
|
|
|
|
|
|
|
|
|
|
| 314 |
|
| 315 |
outputs = llm.chat(batch_messages, sampling_params)
|
| 316 |
|
|
|
|
| 349 |
|
| 350 |
def update_inference_info(example):
|
| 351 |
try:
|
| 352 |
+
existing_info = (
|
| 353 |
+
json.loads(example["inference_info"])
|
| 354 |
+
if example["inference_info"]
|
| 355 |
+
else []
|
| 356 |
+
)
|
| 357 |
except (json.JSONDecodeError, TypeError):
|
| 358 |
existing_info = []
|
| 359 |
existing_info.append(inference_entry)
|
|
|
|
| 391 |
card.push_to_hub(output_dataset, token=HF_TOKEN)
|
| 392 |
|
| 393 |
logger.info("Done! GLM-OCR processing complete.")
|
| 394 |
+
logger.info(
|
| 395 |
+
f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
|
| 396 |
+
)
|
| 397 |
logger.info(f"Processing time: {processing_time_str}")
|
| 398 |
+
logger.info(
|
| 399 |
+
f"Processing speed: {len(dataset) / processing_duration.total_seconds():.2f} images/sec"
|
| 400 |
+
)
|
| 401 |
|
| 402 |
|
| 403 |
if __name__ == "__main__":
|
|
|
|
| 422 |
print("\n5. Running on HF Jobs:")
|
| 423 |
print(" hf jobs uv run --flavor l4x1 \\")
|
| 424 |
print(" -s HF_TOKEN \\")
|
| 425 |
+
print(
|
| 426 |
+
" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/glm-ocr.py \\"
|
| 427 |
+
)
|
| 428 |
print(" input-dataset output-dataset --batch-size 16")
|
| 429 |
print("\nFor full help: uv run glm-ocr.py --help")
|
| 430 |
sys.exit(0)
|