nemotron-ocr-v2 / example.py
emelryan's picture
Duplicate from nvidia/nemotron-ocr-v2-multilingual
b28505d
#!/usr/bin/env python3
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
from nemotron_ocr.inference.pipeline import NemotronOCR
def main(image_path, merge_level, no_visualize, model_dir, lang):
if model_dir is not None:
ocr_pipeline = NemotronOCR(model_dir=model_dir)
else:
ocr_pipeline = NemotronOCR(lang=lang)
predictions = ocr_pipeline(image_path, merge_level=merge_level, visualize=not no_visualize)
print(f"Found {len(predictions)} text regions.")
for pred in predictions:
print(
f" - Text: '{pred['text']}', "
f"Confidence: {pred['confidence']:.2f}, "
f"Bbox: [left={pred['left']:.4f}, upper={pred['upper']:.4f}, "
f"right={pred['right']:.4f}, lower={pred['lower']:.4f}]"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run OCR inference and annotate image.")
parser.add_argument("image_path", type=str, help="Path to the input image.")
parser.add_argument(
"--merge-level",
type=str,
choices=["word", "sentence", "paragraph"],
default="paragraph",
help="Merge level for OCR output (word, sentence, paragraph).",
)
parser.add_argument("--no-visualize", action="store_true", help="Do not save the annotated image.")
parser.add_argument(
"--model-dir",
type=str,
default=None,
help="Path to a directory with detector.pth, recognizer.pth, relational.pth, charset.txt. "
"If omitted, weights are downloaded from Hugging Face (default: v2 multilingual).",
)
parser.add_argument(
"--lang",
type=str,
choices=["en", "multi", "v1"],
default=None,
help="Hub checkpoint when --model-dir is omitted: en=v2 English, multi=v2 multilingual (default), v1=legacy.",
)
args = parser.parse_args()
main(
args.image_path,
merge_level=args.merge_level,
no_visualize=args.no_visualize,
model_dir=args.model_dir,
lang=args.lang,
)