davanstrien HF Staff commited on
Commit
db91874
·
1 Parent(s): 95f8934

Enhance LightOnOCR fine-tuning script with streaming support and memory optimizations

Browse files
Files changed (1) hide show
  1. lightonocr-finetune.py +279 -89
lightonocr-finetune.py CHANGED
@@ -34,6 +34,15 @@ Examples:
34
  --output-dir ./lightonocr-docs \
35
  --freeze-language \
36
  --batch-size 8
 
 
 
 
 
 
 
 
 
37
 
38
  # Push to Hub with evaluation metrics
39
  uv run lightonocr-finetune.py \
@@ -43,28 +52,27 @@ Examples:
43
  --push-to-hub \
44
  --eval-samples 100
45
 
46
- # Run on HF Jobs with GPU
47
  hf jobs run --gpu l4x1 \
48
  uv run lightonocr-finetune.py \
49
- --dataset-id custom/ocr-dataset \
50
  --output-dir ./custom-ocr \
 
51
  --epochs 3
52
  """
53
 
54
  import argparse
55
- import json
56
  import logging
57
  import os
58
  import sys
59
  from datetime import datetime
60
  from pathlib import Path
61
- from typing import Dict, List, Optional, Tuple
62
 
63
  import torch
64
- from datasets import load_dataset, DatasetDict
65
- from huggingface_hub import HfApi, login
66
  from jiwer import cer, wer
67
- from PIL import Image
68
  from tqdm import tqdm
69
  from transformers import (
70
  AutoProcessor,
@@ -78,6 +86,8 @@ logging.basicConfig(
78
  )
79
  logger = logging.getLogger(__name__)
80
 
 
 
81
  # Constants for the assistant pattern in chat template
82
  ASSISTANT_START_PATTERN = [151645, 1699, 151644, 77091, 1699]
83
  DEFAULT_MAX_LENGTH = 1024
@@ -128,7 +138,7 @@ class OCRDataCollator:
128
  ]
129
  batch_messages.append(messages)
130
 
131
- if len(batch_images) == 0:
132
  return None
133
 
134
  # Apply chat template
@@ -202,10 +212,21 @@ def evaluate_model(
202
  batch_size: int = 8,
203
  device: str = "cuda",
204
  description: str = "Model",
 
205
  ) -> Dict[str, float]:
206
  """
207
  Evaluate model on dataset and compute OCR metrics.
208
 
 
 
 
 
 
 
 
 
 
 
209
  Returns:
210
  Dictionary with CER, WER, and perfect match count
211
  """
@@ -215,47 +236,102 @@ def evaluate_model(
215
 
216
  logger.info(f"Evaluating {description} on {num_samples} samples...")
217
 
218
- # Process in batches
219
- for start_idx in tqdm(range(0, min(num_samples, len(dataset)), batch_size)):
220
- end_idx = min(start_idx + batch_size, num_samples, len(dataset))
221
- batch_samples = [dataset[i] for i in range(start_idx, end_idx)]
222
-
223
- batch_images = [[s["images"][0]] for s in batch_samples]
224
- batch_ground_truths = [
225
- s["texts"][0]["assistant"].strip() for s in batch_samples
226
- ]
227
-
228
- # Prepare inputs
229
- messages = [{"role": "user", "content": [{"type": "image"}]}]
230
- text = processor.apply_chat_template(
231
- messages, tokenize=False, add_generation_prompt=True
232
- )
233
- texts = [text] * len(batch_images)
234
-
235
- inputs = processor(
236
- text=texts,
237
- images=batch_images,
238
- return_tensors="pt",
239
- padding=True,
240
- truncation=True,
241
- max_length=DEFAULT_MAX_LENGTH,
242
- size={"longest_edge": DEFAULT_LONGEST_EDGE},
243
- ).to(device)
244
- inputs["pixel_values"] = inputs["pixel_values"].to(torch.bfloat16)
245
-
246
- # Generate predictions
247
- with torch.no_grad():
248
- outputs = model.generate(**inputs, max_new_tokens=512, do_sample=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
 
250
- input_length = inputs["input_ids"].shape[1]
251
- generated_ids = outputs[:, input_length:]
252
- batch_predictions = processor.batch_decode(
253
- generated_ids, skip_special_tokens=True
254
- )
255
- batch_predictions = [p.strip() for p in batch_predictions]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
 
257
- predictions.extend(batch_predictions)
258
- ground_truths.extend(batch_ground_truths)
259
 
260
  # Compute metrics
261
  cer_score = cer(ground_truths, predictions) * 100
@@ -264,8 +340,9 @@ def evaluate_model(
264
  1 for pred, gt in zip(predictions, ground_truths) if pred == gt
265
  )
266
 
 
267
  logger.info(
268
- f"CER: {cer_score:.2f}% | WER: {wer_score:.2f}% | Perfect: {perfect_matches}/{num_samples}"
269
  )
270
 
271
  # Show a few examples
@@ -279,7 +356,7 @@ def evaluate_model(
279
  "cer": cer_score,
280
  "wer": wer_score,
281
  "perfect_matches": perfect_matches,
282
- "total_samples": num_samples,
283
  }
284
 
285
 
@@ -469,6 +546,24 @@ def main():
469
  "--test-split", type=str, default="train[95%:]", help="Test split specification"
470
  )
471
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
472
  # Model arguments
473
  parser.add_argument(
474
  "--model-id",
@@ -554,6 +649,11 @@ def main():
554
  "--max-samples", type=int, help="Limit number of training samples (for testing)"
555
  )
556
  parser.add_argument("--seed", type=int, default=42, help="Random seed")
 
 
 
 
 
557
 
558
  args = parser.parse_args()
559
 
@@ -585,18 +685,63 @@ def main():
585
 
586
  # Load dataset
587
  logger.info(f"Loading dataset: {args.dataset_id}/{args.subset}")
588
- train_ds = load_dataset(args.dataset_id, args.subset, split=args.train_split)
589
- val_ds = load_dataset(args.dataset_id, args.subset, split=args.val_split)
590
- test_ds = load_dataset(args.dataset_id, args.subset, split=args.test_split)
591
 
592
- # Limit samples if requested
593
- if args.max_samples:
594
- train_ds = train_ds.select(range(min(args.max_samples, len(train_ds))))
595
- logger.info(f"Limited training to {len(train_ds)} samples")
 
 
 
 
 
 
 
 
 
596
 
597
- logger.info(
598
- f"Dataset sizes - Train: {len(train_ds)}, Val: {len(val_ds)}, Test: {len(test_ds)}"
599
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
600
 
601
  # Load processor
602
  logger.info(f"Loading processor from {args.model_id}")
@@ -661,6 +806,7 @@ def main():
661
  batch_size=args.eval_batch_size,
662
  device=device,
663
  description="Base model",
 
664
  )
665
  torch.cuda.empty_cache()
666
 
@@ -669,35 +815,68 @@ def main():
669
  processor, max_length=args.max_length, longest_edge=args.longest_edge
670
  )
671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
672
  # Setup training arguments
673
- training_args = TrainingArguments(
674
- output_dir=args.output_dir,
675
- num_train_epochs=args.epochs,
676
- per_device_train_batch_size=args.batch_size,
677
- per_device_eval_batch_size=args.eval_batch_size,
678
- gradient_accumulation_steps=args.gradient_accumulation,
679
- learning_rate=args.learning_rate,
680
- weight_decay=0.0,
681
- logging_steps=50,
682
- eval_strategy="steps",
683
- eval_steps=args.eval_steps,
684
- save_strategy="steps",
685
- save_steps=args.save_steps,
686
- save_total_limit=2,
687
- load_best_model_at_end=True,
688
- metric_for_best_model="eval_loss",
689
- bf16=True,
690
- fp16=False,
691
- remove_unused_columns=False,
692
- dataloader_pin_memory=False,
693
- gradient_checkpointing=True,
694
- optim="adamw_torch_fused" if torch.cuda.is_available() else "adamw_torch",
695
- warmup_steps=args.warmup_steps,
696
- lr_scheduler_type="linear",
697
- push_to_hub=args.push_to_hub,
698
- hub_model_id=args.hub_model_id,
699
- hub_private_repo=args.private,
700
- )
 
 
 
 
 
 
 
 
 
 
701
 
702
  # Use smaller validation set for faster evaluation
703
  val_ds_small = val_ds.select(range(min(100, len(val_ds))))
@@ -715,7 +894,13 @@ def main():
715
  logger.info("\n" + "=" * 80)
716
  logger.info("STARTING TRAINING")
717
  logger.info("=" * 80)
718
- logger.info(f"Training samples: {len(train_ds)}")
 
 
 
 
 
 
719
  logger.info(f"Validation samples: {len(val_ds_small)}")
720
  logger.info(f"Effective batch size: {args.batch_size * args.gradient_accumulation}")
721
 
@@ -738,6 +923,7 @@ def main():
738
  batch_size=args.eval_batch_size,
739
  device=device,
740
  description="Fine-tuned model",
 
741
  )
742
 
743
  # Show comparison
@@ -816,13 +1002,17 @@ if __name__ == "__main__":
816
  print(
817
  " uv run lightonocr-finetune.py --freeze-language --output-dir ./model\n"
818
  )
 
 
 
 
819
  print(" # Push to Hub:")
820
  print(
821
  " uv run lightonocr-finetune.py --hub-model-id username/model --push-to-hub\n"
822
  )
823
  print(" # Run on HF Jobs:")
824
  print(
825
- " hf jobs run --gpu l4x1 uv run lightonocr-finetune.py --output-dir ./model"
826
  )
827
  sys.exit(0)
828
 
 
34
  --output-dir ./lightonocr-docs \
35
  --freeze-language \
36
  --batch-size 8
37
+
38
+ # Stream large datasets to reduce memory usage
39
+ uv run lightonocr-finetune.py \
40
+ --dataset-id HuggingFaceM4/FineVision \
41
+ --subset olmOCR-mix-0225-books \
42
+ --output-dir ./lightonocr-books \
43
+ --streaming \
44
+ --shuffle-buffer-size 10000 \
45
+ --max-train-samples 5000 # Will auto-calculate max-steps
46
 
47
  # Push to Hub with evaluation metrics
48
  uv run lightonocr-finetune.py \
 
52
  --push-to-hub \
53
  --eval-samples 100
54
 
55
+ # Run on HF Jobs with GPU and streaming
56
  hf jobs run --gpu l4x1 \
57
  uv run lightonocr-finetune.py \
58
+ --dataset-id custom/large-ocr-dataset \
59
  --output-dir ./custom-ocr \
60
+ --streaming \
61
  --epochs 3
62
  """
63
 
64
  import argparse
 
65
  import logging
66
  import os
67
  import sys
68
  from datetime import datetime
69
  from pathlib import Path
70
+ from typing import Dict, Optional
71
 
72
  import torch
73
+ from datasets import load_dataset
74
+ from huggingface_hub import login
75
  from jiwer import cer, wer
 
76
  from tqdm import tqdm
77
  from transformers import (
78
  AutoProcessor,
 
86
  )
87
  logger = logging.getLogger(__name__)
88
 
89
+ os.environ["HF_XET_HIGH_PERFORMANCE"] = "1"
90
+
91
  # Constants for the assistant pattern in chat template
92
  ASSISTANT_START_PATTERN = [151645, 1699, 151644, 77091, 1699]
93
  DEFAULT_MAX_LENGTH = 1024
 
138
  ]
139
  batch_messages.append(messages)
140
 
141
+ if not batch_images:
142
  return None
143
 
144
  # Apply chat template
 
212
  batch_size: int = 8,
213
  device: str = "cuda",
214
  description: str = "Model",
215
+ is_streaming: bool = False,
216
  ) -> Dict[str, float]:
217
  """
218
  Evaluate model on dataset and compute OCR metrics.
219
 
220
+ Args:
221
+ model: The model to evaluate
222
+ processor: The processor for the model
223
+ dataset: Dataset to evaluate on (can be streaming or regular)
224
+ num_samples: Number of samples to evaluate
225
+ batch_size: Batch size for evaluation
226
+ device: Device to run evaluation on
227
+ description: Description for logging
228
+ is_streaming: Whether the dataset is a streaming dataset
229
+
230
  Returns:
231
  Dictionary with CER, WER, and perfect match count
232
  """
 
236
 
237
  logger.info(f"Evaluating {description} on {num_samples} samples...")
238
 
239
+ # Handle streaming datasets differently
240
+ if is_streaming:
241
+ # For streaming datasets, we take the first num_samples
242
+ samples_processed = 0
243
+ batch_samples = []
244
+
245
+ for sample in tqdm(dataset, total=num_samples, desc="Evaluating"):
246
+ if samples_processed >= num_samples:
247
+ break
248
+
249
+ batch_samples.append(sample)
250
+ samples_processed += 1
251
+
252
+ # Process when we have a full batch or reached the end
253
+ if len(batch_samples) == batch_size or samples_processed == num_samples:
254
+ batch_images = [[s["images"][0]] for s in batch_samples]
255
+ batch_ground_truths = [
256
+ s["texts"][0]["assistant"].strip() for s in batch_samples
257
+ ]
258
+
259
+ # Prepare inputs
260
+ messages = [{"role": "user", "content": [{"type": "image"}]}]
261
+ text = processor.apply_chat_template(
262
+ messages, tokenize=False, add_generation_prompt=True
263
+ )
264
+ texts = [text] * len(batch_images)
265
+
266
+ inputs = processor(
267
+ text=texts,
268
+ images=batch_images,
269
+ return_tensors="pt",
270
+ padding=True,
271
+ truncation=True,
272
+ max_length=DEFAULT_MAX_LENGTH,
273
+ size={"longest_edge": DEFAULT_LONGEST_EDGE},
274
+ ).to(device)
275
+ inputs["pixel_values"] = inputs["pixel_values"].to(torch.bfloat16)
276
+
277
+ # Generate predictions
278
+ with torch.no_grad():
279
+ outputs = model.generate(
280
+ **inputs, max_new_tokens=512, do_sample=True
281
+ )
282
+
283
+ input_length = inputs["input_ids"].shape[1]
284
+ generated_ids = outputs[:, input_length:]
285
+ batch_predictions = processor.batch_decode(
286
+ generated_ids, skip_special_tokens=True
287
+ )
288
+ batch_predictions = [p.strip() for p in batch_predictions]
289
+
290
+ predictions.extend(batch_predictions)
291
+ ground_truths.extend(batch_ground_truths)
292
+ batch_samples = []
293
+ else:
294
+ # Original non-streaming evaluation
295
+ for start_idx in tqdm(range(0, min(num_samples, len(dataset)), batch_size)):
296
+ end_idx = min(start_idx + batch_size, num_samples, len(dataset))
297
+ batch_samples = [dataset[i] for i in range(start_idx, end_idx)]
298
+
299
+ batch_images = [[s["images"][0]] for s in batch_samples]
300
+ batch_ground_truths = [
301
+ s["texts"][0]["assistant"].strip() for s in batch_samples
302
+ ]
303
 
304
+ # Prepare inputs
305
+ messages = [{"role": "user", "content": [{"type": "image"}]}]
306
+ text = processor.apply_chat_template(
307
+ messages, tokenize=False, add_generation_prompt=True
308
+ )
309
+ texts = [text] * len(batch_images)
310
+
311
+ inputs = processor(
312
+ text=texts,
313
+ images=batch_images,
314
+ return_tensors="pt",
315
+ padding=True,
316
+ truncation=True,
317
+ max_length=DEFAULT_MAX_LENGTH,
318
+ size={"longest_edge": DEFAULT_LONGEST_EDGE},
319
+ ).to(device)
320
+ inputs["pixel_values"] = inputs["pixel_values"].to(torch.bfloat16)
321
+
322
+ # Generate predictions
323
+ with torch.no_grad():
324
+ outputs = model.generate(**inputs, max_new_tokens=512, do_sample=True)
325
+
326
+ input_length = inputs["input_ids"].shape[1]
327
+ generated_ids = outputs[:, input_length:]
328
+ batch_predictions = processor.batch_decode(
329
+ generated_ids, skip_special_tokens=True
330
+ )
331
+ batch_predictions = [p.strip() for p in batch_predictions]
332
 
333
+ predictions.extend(batch_predictions)
334
+ ground_truths.extend(batch_ground_truths)
335
 
336
  # Compute metrics
337
  cer_score = cer(ground_truths, predictions) * 100
 
340
  1 for pred, gt in zip(predictions, ground_truths) if pred == gt
341
  )
342
 
343
+ actual_samples = len(predictions)
344
  logger.info(
345
+ f"CER: {cer_score:.2f}% | WER: {wer_score:.2f}% | Perfect: {perfect_matches}/{actual_samples}"
346
  )
347
 
348
  # Show a few examples
 
356
  "cer": cer_score,
357
  "wer": wer_score,
358
  "perfect_matches": perfect_matches,
359
+ "total_samples": actual_samples,
360
  }
361
 
362
 
 
546
  "--test-split", type=str, default="train[95%:]", help="Test split specification"
547
  )
548
 
549
+ # Streaming arguments
550
+ parser.add_argument(
551
+ "--streaming",
552
+ action="store_true",
553
+ help="Use dataset streaming to reduce memory usage (Note: uses full training set, ignores train-split percentages)",
554
+ )
555
+ parser.add_argument(
556
+ "--shuffle-buffer-size",
557
+ type=int,
558
+ default=10000,
559
+ help="Buffer size for shuffling when using streaming (default: 10000)",
560
+ )
561
+ parser.add_argument(
562
+ "--max-train-samples",
563
+ type=int,
564
+ help="Maximum number of training samples when streaming (useful for quick experiments)",
565
+ )
566
+
567
  # Model arguments
568
  parser.add_argument(
569
  "--model-id",
 
649
  "--max-samples", type=int, help="Limit number of training samples (for testing)"
650
  )
651
  parser.add_argument("--seed", type=int, default=42, help="Random seed")
652
+ parser.add_argument(
653
+ "--max-steps",
654
+ type=int,
655
+ help="Maximum number of training steps (auto-calculated for streaming if not specified)"
656
+ )
657
 
658
  args = parser.parse_args()
659
 
 
685
 
686
  # Load dataset
687
  logger.info(f"Loading dataset: {args.dataset_id}/{args.subset}")
 
 
 
688
 
689
+ if args.streaming:
690
+ logger.info("Using streaming mode for dataset loading")
691
+ # For streaming, we can only use "train" split, not percentage-based splits
692
+ # Load the full training set in streaming mode
693
+ train_ds = load_dataset(
694
+ args.dataset_id, args.subset, split="train", streaming=True
695
+ )
696
+
697
+ # For validation and test, we need to load a subset of the data
698
+ # We'll use the last 15% of the data for validation and test
699
+ # Load the full dataset for splitting into val/test
700
+ full_ds = load_dataset(args.dataset_id, args.subset, split="train")
701
+ total_size = len(full_ds)
702
 
703
+ # Calculate split indices
704
+ train_end = int(0.85 * total_size)
705
+ val_end = int(0.95 * total_size)
706
+
707
+ # Create validation and test splits
708
+ val_ds = full_ds.select(range(train_end, val_end))
709
+ test_ds = full_ds.select(range(val_end, total_size))
710
+
711
+ # Clean up the full dataset to save memory
712
+ del full_ds
713
+
714
+ # Apply shuffling with buffer for streaming dataset
715
+ train_ds = train_ds.shuffle(
716
+ seed=args.seed, buffer_size=args.shuffle_buffer_size
717
+ )
718
+
719
+ # Limit samples if requested (for streaming)
720
+ if args.max_samples or args.max_train_samples:
721
+ max_samples = args.max_samples or args.max_train_samples
722
+ train_ds = train_ds.take(max_samples)
723
+ logger.info(f"Limited training to {max_samples} samples (streaming mode)")
724
+
725
+ logger.info(
726
+ f"Dataset loaded - Training: streaming (full train set), Val: {len(val_ds)}, Test: {len(test_ds)}"
727
+ )
728
+ logger.info(
729
+ "Note: When streaming, using full training set. Use --max-train-samples to limit."
730
+ )
731
+ else:
732
+ # Original non-streaming loading
733
+ train_ds = load_dataset(args.dataset_id, args.subset, split=args.train_split)
734
+ val_ds = load_dataset(args.dataset_id, args.subset, split=args.val_split)
735
+ test_ds = load_dataset(args.dataset_id, args.subset, split=args.test_split)
736
+
737
+ # Limit samples if requested (non-streaming)
738
+ if args.max_samples:
739
+ train_ds = train_ds.select(range(min(args.max_samples, len(train_ds))))
740
+ logger.info(f"Limited training to {len(train_ds)} samples")
741
+
742
+ logger.info(
743
+ f"Dataset sizes - Train: {len(train_ds)}, Val: {len(val_ds)}, Test: {len(test_ds)}"
744
+ )
745
 
746
  # Load processor
747
  logger.info(f"Loading processor from {args.model_id}")
 
806
  batch_size=args.eval_batch_size,
807
  device=device,
808
  description="Base model",
809
+ is_streaming=False, # Test dataset is never streamed
810
  )
811
  torch.cuda.empty_cache()
812
 
 
815
  processor, max_length=args.max_length, longest_edge=args.longest_edge
816
  )
817
 
818
+ # Calculate max_steps for streaming datasets
819
+ max_steps = None
820
+ if args.streaming:
821
+ if args.max_steps:
822
+ max_steps = args.max_steps
823
+ logger.info(f"Using user-specified max_steps: {max_steps}")
824
+ else:
825
+ # Estimate max_steps based on dataset size and batch configuration
826
+ if args.max_train_samples:
827
+ # Calculate based on limited samples
828
+ steps_per_epoch = args.max_train_samples // (args.batch_size * args.gradient_accumulation)
829
+ max_steps = steps_per_epoch * args.epochs
830
+ logger.info(f"Calculated max_steps from max_train_samples: {max_steps}")
831
+ else:
832
+ # Use a default reasonable value
833
+ # Approximate based on typical dataset sizes
834
+ # Default to 1000 steps per epoch as a reasonable estimate
835
+ max_steps = 1000 * args.epochs
836
+ logger.warning(
837
+ f"Streaming mode: Using default max_steps={max_steps}. "
838
+ f"Consider setting --max-steps or --max-train-samples for precise control."
839
+ )
840
+
841
  # Setup training arguments
842
+ # When streaming, use max_steps instead of num_train_epochs
843
+ training_args_dict = {
844
+ "output_dir": args.output_dir,
845
+ "per_device_train_batch_size": args.batch_size,
846
+ "per_device_eval_batch_size": args.eval_batch_size,
847
+ "gradient_accumulation_steps": args.gradient_accumulation,
848
+ "learning_rate": args.learning_rate,
849
+ "weight_decay": 0.0,
850
+ "logging_steps": 50,
851
+ "eval_strategy": "steps",
852
+ "eval_steps": args.eval_steps,
853
+ "save_strategy": "steps",
854
+ "save_steps": args.save_steps,
855
+ "save_total_limit": 2,
856
+ "load_best_model_at_end": True,
857
+ "metric_for_best_model": "eval_loss",
858
+ "bf16": True,
859
+ "fp16": False,
860
+ "remove_unused_columns": False,
861
+ "dataloader_pin_memory": False,
862
+ "gradient_checkpointing": True,
863
+ "optim": "adamw_torch_fused" if torch.cuda.is_available() else "adamw_torch",
864
+ "warmup_steps": args.warmup_steps,
865
+ "lr_scheduler_type": "linear",
866
+ "push_to_hub": args.push_to_hub,
867
+ "hub_model_id": args.hub_model_id,
868
+ "hub_private_repo": args.private,
869
+ }
870
+
871
+ # Add either max_steps or num_train_epochs based on streaming mode
872
+ if args.streaming:
873
+ training_args_dict["max_steps"] = max_steps
874
+ # Still set num_train_epochs for model card generation
875
+ training_args_dict["num_train_epochs"] = args.epochs
876
+ else:
877
+ training_args_dict["num_train_epochs"] = args.epochs
878
+
879
+ training_args = TrainingArguments(**training_args_dict)
880
 
881
  # Use smaller validation set for faster evaluation
882
  val_ds_small = val_ds.select(range(min(100, len(val_ds))))
 
894
  logger.info("\n" + "=" * 80)
895
  logger.info("STARTING TRAINING")
896
  logger.info("=" * 80)
897
+ if args.streaming:
898
+ logger.info(
899
+ f"Training samples: streaming mode (max: {args.max_train_samples or 'unlimited'})"
900
+ )
901
+ logger.info(f"Max steps: {max_steps}")
902
+ else:
903
+ logger.info(f"Training samples: {len(train_ds)}")
904
  logger.info(f"Validation samples: {len(val_ds_small)}")
905
  logger.info(f"Effective batch size: {args.batch_size * args.gradient_accumulation}")
906
 
 
923
  batch_size=args.eval_batch_size,
924
  device=device,
925
  description="Fine-tuned model",
926
+ is_streaming=False, # Test dataset is never streamed
927
  )
928
 
929
  # Show comparison
 
1002
  print(
1003
  " uv run lightonocr-finetune.py --freeze-language --output-dir ./model\n"
1004
  )
1005
+ print(" # Stream large datasets (memory-efficient):")
1006
+ print(
1007
+ " uv run lightonocr-finetune.py --streaming --shuffle-buffer-size 10000 --output-dir ./model\n"
1008
+ )
1009
  print(" # Push to Hub:")
1010
  print(
1011
  " uv run lightonocr-finetune.py --hub-model-id username/model --push-to-hub\n"
1012
  )
1013
  print(" # Run on HF Jobs:")
1014
  print(
1015
+ " hf jobs run --gpu l4x1 uv run lightonocr-finetune.py --streaming --output-dir ./model"
1016
  )
1017
  sys.exit(0)
1018