Commit ·
b11efde
1
Parent(s): 13506fd
Fix LightOnOCR-2 repetition issues and configuration
Browse files- Change max_tokens default from 6144 to 4096 (as recommended by model card)
- Add enable_prefix_caching=False (recommended by model card)
- Add mm_processor_kwargs with cache_gb=0 to disable multimodal processor caching
- Replace deprecated hf_transfer extra with hf-xet package
These changes align with the official vLLM serve recommendations:
vllm serve lightonai/LightOnOCR-2-1B \
--limit-mm-per-prompt '{"image": 1}' \
--mm-processor-cache-gb 0 \
--no-enable-prefix-caching
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
- lighton-ocr2.py +7 -4
lighton-ocr2.py
CHANGED
|
@@ -3,7 +3,8 @@
|
|
| 3 |
# dependencies = [
|
| 4 |
# "datasets>=3.1.0",
|
| 5 |
# "pyarrow>=17.0.0,<18.0.0",
|
| 6 |
-
# "huggingface-hub
|
|
|
|
| 7 |
# "pillow",
|
| 8 |
# "vllm",
|
| 9 |
# "tqdm",
|
|
@@ -277,7 +278,7 @@ def main(
|
|
| 277 |
image_column: str = "image",
|
| 278 |
batch_size: int = 16,
|
| 279 |
max_model_len: int = 8192,
|
| 280 |
-
max_tokens: int =
|
| 281 |
temperature: float = 0.2,
|
| 282 |
top_p: float = 0.9,
|
| 283 |
gpu_memory_utilization: float = 0.8,
|
|
@@ -339,6 +340,8 @@ def main(
|
|
| 339 |
gpu_memory_utilization=gpu_memory_utilization,
|
| 340 |
limit_mm_per_prompt={"image": 1}, # One image per prompt
|
| 341 |
enforce_eager=False, # Use torch.compile for better performance
|
|
|
|
|
|
|
| 342 |
)
|
| 343 |
|
| 344 |
# LightOnOCR-2 recommended sampling parameters
|
|
@@ -540,8 +543,8 @@ Examples:
|
|
| 540 |
parser.add_argument(
|
| 541 |
"--max-tokens",
|
| 542 |
type=int,
|
| 543 |
-
default=
|
| 544 |
-
help="Maximum tokens to generate (default:
|
| 545 |
)
|
| 546 |
parser.add_argument(
|
| 547 |
"--temperature",
|
|
|
|
| 3 |
# dependencies = [
|
| 4 |
# "datasets>=3.1.0",
|
| 5 |
# "pyarrow>=17.0.0,<18.0.0",
|
| 6 |
+
# "huggingface-hub",
|
| 7 |
+
# "hf-xet",
|
| 8 |
# "pillow",
|
| 9 |
# "vllm",
|
| 10 |
# "tqdm",
|
|
|
|
| 278 |
image_column: str = "image",
|
| 279 |
batch_size: int = 16,
|
| 280 |
max_model_len: int = 8192,
|
| 281 |
+
max_tokens: int = 4096,
|
| 282 |
temperature: float = 0.2,
|
| 283 |
top_p: float = 0.9,
|
| 284 |
gpu_memory_utilization: float = 0.8,
|
|
|
|
| 340 |
gpu_memory_utilization=gpu_memory_utilization,
|
| 341 |
limit_mm_per_prompt={"image": 1}, # One image per prompt
|
| 342 |
enforce_eager=False, # Use torch.compile for better performance
|
| 343 |
+
enable_prefix_caching=False, # Recommended by model card
|
| 344 |
+
mm_processor_kwargs={"cache_gb": 0}, # Disable multimodal processor caching
|
| 345 |
)
|
| 346 |
|
| 347 |
# LightOnOCR-2 recommended sampling parameters
|
|
|
|
| 543 |
parser.add_argument(
|
| 544 |
"--max-tokens",
|
| 545 |
type=int,
|
| 546 |
+
default=4096,
|
| 547 |
+
help="Maximum tokens to generate (default: 4096, recommended for arXiv papers)",
|
| 548 |
)
|
| 549 |
parser.add_argument(
|
| 550 |
"--temperature",
|