rain1024 Claude Opus 4.5 commited on
Commit
73132c4
·
1 Parent(s): d1693da

Add batch processing and run script for GPU optimization

Browse files

- Update convert_to_ud.py with batch processing and model pre-loading
- Add gpu_stats.py for monitoring GPU utilization as table
- Add run_conversion.sh to save results to timestamped folders

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

scripts/convert_to_ud.py CHANGED
@@ -2,11 +2,15 @@
2
  Convert sentences to Universal Dependencies format compatible with HuggingFace.
3
  Structure follows: https://huggingface.co/datasets/commul/universal_dependencies/viewer/vi_vtb
4
  Uses underthesea dependency_parse for proper annotations.
 
 
5
  """
6
 
7
  import json
8
  import os
9
  from os.path import dirname, expanduser, join
 
 
10
 
11
  # Fix GPU tensor compatibility issue with pack_padded_sequence
12
  # The lengths tensor must be on CPU even when using CUDA
@@ -22,6 +26,9 @@ torch.nn.utils.rnn.pack_padded_sequence = _patched_pack
22
 
23
  from underthesea import dependency_parse, pos_tag
24
 
 
 
 
25
  # Map Vietnamese POS tags to Universal POS tags
26
  # Based on: https://universaldependencies.org/u/pos/
27
  UPOS_MAP = {
@@ -405,75 +412,141 @@ def load_sentences(filepath):
405
  return sentences
406
 
407
 
408
- def convert_to_ud_format(sentences):
409
- """Convert sentences to UD format using dependency_parse."""
410
- data = []
 
411
 
412
- for idx, text in enumerate(sentences, 1):
413
- if idx % 100 == 0:
414
- print(f" Processing sentence {idx}/{len(sentences)}...")
 
 
 
415
 
416
- sent_id = f"s{idx}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
 
418
- try:
419
- # Use dependency_parse for tokens, heads, and deprels
420
- parsed = dependency_parse(text)
421
- # parsed is list of (token, head, deprel)
422
 
423
- tokens = [t[0] for t in parsed]
424
- head = [str(t[1]) for t in parsed]
425
- deprel = [t[2] for t in parsed]
426
 
427
- # Get POS tags
428
- tagged = pos_tag(text)
429
- # Align POS tags with dependency tokens
430
- if len(tagged) == len(tokens):
431
- xpos = [t[1] for t in tagged] # Original Vietnamese tags
432
- upos = [to_upos(t[1], t[0]) for t in tagged] # Universal tags with token
433
- else:
434
- # Fallback: use 'X' for unknown
435
- xpos = ['X'] * len(tokens)
436
- upos = ['X'] * len(tokens)
437
-
438
- except Exception as e:
439
- print(f" Error parsing sentence {idx}: {e}")
440
- # Fallback to pos_tag only
441
- tagged = pos_tag(text)
442
- tokens = [t[0] for t in tagged]
443
- xpos = [t[1] for t in tagged]
444
- upos = [to_upos(t[1], t[0]) for t in tagged]
445
- head = ["0"] * len(tokens)
446
- deprel = ["dep"] * len(tokens)
447
- if len(tokens) > 0:
448
- deprel[0] = "root"
449
-
450
- # Apply syntax fixes
451
- upos, head, deprel = fix_syntax_errors(tokens, upos, head, deprel)
452
-
453
- # Create other fields
454
- n = len(tokens)
455
- lemmas = [t.lower() for t in tokens] # Vietnamese: lemma = lowercase token
456
- feats = ["_"] * n
457
- deps = ["_"] * n
458
- misc = compute_space_after(text, tokens) # Compute SpaceAfter
459
-
460
- row = {
461
- "sent_id": sent_id,
462
- "text": text,
463
- "comments": [f"# sent_id = {sent_id}", f"# text = {text}"],
464
- "tokens": tokens,
465
- "lemmas": lemmas,
466
- "upos": upos,
467
- "xpos": xpos,
468
- "feats": feats,
469
- "head": head,
470
- "deprel": deprel,
471
- "deps": deps,
472
- "misc": misc,
473
- "mwt": [],
474
- "empty_nodes": []
475
  }
476
- data.append(row)
 
 
 
 
 
 
 
477
 
478
  return data
479
 
@@ -511,10 +584,17 @@ def save_conllu(data, filepath):
511
 
512
  def main():
513
  import argparse
 
514
  parser = argparse.ArgumentParser(description="Convert sentences to UD format")
515
  parser.add_argument("--input", "-i", type=str, help="Input sentences file")
516
  parser.add_argument("--output-dir", "-o", type=str, help="Output directory")
517
  parser.add_argument("--prefix", "-p", type=str, default="train", help="Output file prefix")
 
 
 
 
 
 
518
  args = parser.parse_args()
519
 
520
  # Default paths
@@ -533,8 +613,24 @@ def main():
533
  sentences = load_sentences(sentences_file)
534
  print(f"Loaded {len(sentences)} sentences")
535
 
536
- print("Converting to UD format...")
537
- data = convert_to_ud_format(sentences)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
538
 
539
  # Save as JSONL (for HuggingFace)
540
  jsonl_file = join(output_dir, f"{args.prefix}.jsonl")
 
2
  Convert sentences to Universal Dependencies format compatible with HuggingFace.
3
  Structure follows: https://huggingface.co/datasets/commul/universal_dependencies/viewer/vi_vtb
4
  Uses underthesea dependency_parse for proper annotations.
5
+
6
+ Optimized for GPU batch processing.
7
  """
8
 
9
  import json
10
  import os
11
  from os.path import dirname, expanduser, join
12
+ from concurrent.futures import ThreadPoolExecutor, as_completed
13
+ import multiprocessing
14
 
15
  # Fix GPU tensor compatibility issue with pack_padded_sequence
16
  # The lengths tensor must be on CPU even when using CUDA
 
26
 
27
  from underthesea import dependency_parse, pos_tag
28
 
29
+ # Global model cache for batch processing
30
+ _models_loaded = False
31
+
32
  # Map Vietnamese POS tags to Universal POS tags
33
  # Based on: https://universaldependencies.org/u/pos/
34
  UPOS_MAP = {
 
412
  return sentences
413
 
414
 
415
+ def process_single_sentence(args):
416
+ """Process a single sentence (used for parallel processing)."""
417
+ idx, text = args
418
+ sent_id = f"s{idx}"
419
 
420
+ try:
421
+ # Use dependency_parse for tokens, heads, and deprels
422
+ parsed = dependency_parse(text)
423
+ tokens = [t[0] for t in parsed]
424
+ head = [str(t[1]) for t in parsed]
425
+ deprel = [t[2] for t in parsed]
426
 
427
+ # Get POS tags
428
+ tagged = pos_tag(text)
429
+ if len(tagged) == len(tokens):
430
+ xpos = [t[1] for t in tagged]
431
+ upos = [to_upos(t[1], t[0]) for t in tagged]
432
+ else:
433
+ xpos = ['X'] * len(tokens)
434
+ upos = ['X'] * len(tokens)
435
+
436
+ except Exception as e:
437
+ # Fallback to pos_tag only
438
+ tagged = pos_tag(text)
439
+ tokens = [t[0] for t in tagged]
440
+ xpos = [t[1] for t in tagged]
441
+ upos = [to_upos(t[1], t[0]) for t in tagged]
442
+ head = ["0"] * len(tokens)
443
+ deprel = ["dep"] * len(tokens)
444
+ if len(tokens) > 0:
445
+ deprel[0] = "root"
446
+
447
+ # Apply syntax fixes
448
+ upos, head, deprel = fix_syntax_errors(tokens, upos, head, deprel)
449
+
450
+ # Create other fields
451
+ n = len(tokens)
452
+ lemmas = [t.lower() for t in tokens]
453
+ feats = ["_"] * n
454
+ deps = ["_"] * n
455
+ misc = compute_space_after(text, tokens)
456
+
457
+ return idx, {
458
+ "sent_id": sent_id,
459
+ "text": text,
460
+ "comments": [f"# sent_id = {sent_id}", f"# text = {text}"],
461
+ "tokens": tokens,
462
+ "lemmas": lemmas,
463
+ "upos": upos,
464
+ "xpos": xpos,
465
+ "feats": feats,
466
+ "head": head,
467
+ "deprel": deprel,
468
+ "deps": deps,
469
+ "misc": misc,
470
+ "mwt": [],
471
+ "empty_nodes": []
472
+ }
473
+
474
+
475
+ def convert_to_ud_format(sentences, batch_size=32, num_workers=4):
476
+ """Convert sentences to UD format using dependency_parse with batch processing."""
477
+ global _models_loaded
478
+
479
+ # Pre-warm models with a dummy sentence to load them into GPU memory
480
+ if not _models_loaded:
481
+ print(" Loading models into GPU memory...")
482
+ _ = dependency_parse("Xin chào")
483
+ _ = pos_tag("Xin chào")
484
+ _models_loaded = True
485
+ print(" Models loaded.")
486
+
487
+ data = [None] * len(sentences)
488
+ total = len(sentences)
489
+
490
+ # Process in batches for better GPU utilization
491
+ print(f" Processing {total} sentences with batch_size={batch_size}...")
492
+
493
+ for batch_start in range(0, total, batch_size):
494
+ batch_end = min(batch_start + batch_size, total)
495
+ batch = [(i + 1, sentences[i]) for i in range(batch_start, batch_end)]
496
+
497
+ # Process batch - GPU models benefit from sequential calls within batch
498
+ # as they can better utilize GPU memory
499
+ for args in batch:
500
+ idx, row = process_single_sentence(args)
501
+ data[idx - 1] = row
502
+
503
+ # Progress update
504
+ processed = batch_end
505
+ if processed % 100 == 0 or processed == total:
506
+ print(f" Processed {processed}/{total} sentences ({100*processed/total:.1f}%)")
507
 
508
+ return data
 
 
 
509
 
 
 
 
510
 
511
+ def convert_to_ud_format_parallel(sentences, num_workers=None):
512
+ """Convert sentences using multiple workers (CPU parallelism).
513
+
514
+ Note: This is useful when GPU is bottleneck or for CPU-only processing.
515
+ For GPU processing, use convert_to_ud_format with batch processing.
516
+ """
517
+ global _models_loaded
518
+
519
+ if num_workers is None:
520
+ num_workers = min(4, multiprocessing.cpu_count())
521
+
522
+ # Pre-warm models
523
+ if not _models_loaded:
524
+ print(" Loading models...")
525
+ _ = dependency_parse("Xin chào")
526
+ _ = pos_tag("Xin chào")
527
+ _models_loaded = True
528
+ print(" Models loaded.")
529
+
530
+ data = [None] * len(sentences)
531
+ total = len(sentences)
532
+ processed = 0
533
+
534
+ print(f" Processing {total} sentences with {num_workers} workers...")
535
+
536
+ # Use ThreadPoolExecutor for I/O bound tasks with GPU
537
+ with ThreadPoolExecutor(max_workers=num_workers) as executor:
538
+ futures = {
539
+ executor.submit(process_single_sentence, (i + 1, sentences[i])): i
540
+ for i in range(total)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
541
  }
542
+
543
+ for future in as_completed(futures):
544
+ idx, row = future.result()
545
+ data[idx - 1] = row
546
+ processed += 1
547
+
548
+ if processed % 100 == 0 or processed == total:
549
+ print(f" Processed {processed}/{total} sentences ({100*processed/total:.1f}%)")
550
 
551
  return data
552
 
 
584
 
585
  def main():
586
  import argparse
587
+ import time
588
  parser = argparse.ArgumentParser(description="Convert sentences to UD format")
589
  parser.add_argument("--input", "-i", type=str, help="Input sentences file")
590
  parser.add_argument("--output-dir", "-o", type=str, help="Output directory")
591
  parser.add_argument("--prefix", "-p", type=str, default="train", help="Output file prefix")
592
+ parser.add_argument("--batch-size", "-b", type=int, default=64,
593
+ help="Batch size for GPU processing (default: 64, increase for more GPU usage)")
594
+ parser.add_argument("--parallel", action="store_true",
595
+ help="Use parallel processing with multiple workers")
596
+ parser.add_argument("--workers", "-w", type=int, default=4,
597
+ help="Number of workers for parallel processing (default: 4)")
598
  args = parser.parse_args()
599
 
600
  # Default paths
 
613
  sentences = load_sentences(sentences_file)
614
  print(f"Loaded {len(sentences)} sentences")
615
 
616
+ # Check GPU availability
617
+ if torch.cuda.is_available():
618
+ print(f"GPU: {torch.cuda.get_device_name(0)}")
619
+ print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB")
620
+ else:
621
+ print("GPU: Not available (using CPU)")
622
+
623
+ print(f"\nConverting to UD format (batch_size={args.batch_size})...")
624
+ start_time = time.time()
625
+
626
+ if args.parallel:
627
+ data = convert_to_ud_format_parallel(sentences, num_workers=args.workers)
628
+ else:
629
+ data = convert_to_ud_format(sentences, batch_size=args.batch_size)
630
+
631
+ elapsed = time.time() - start_time
632
+ speed = len(sentences) / elapsed
633
+ print(f"\nCompleted in {elapsed:.1f}s ({speed:.1f} sentences/sec)")
634
 
635
  # Save as JSONL (for HuggingFace)
636
  jsonl_file = join(output_dir, f"{args.prefix}.jsonl")
scripts/gpu_stats.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Monitor GPU stats and output as a formatted table.
4
+ """
5
+
6
+ import subprocess
7
+ import time
8
+ import argparse
9
+ from datetime import datetime
10
+
11
+
12
+ def get_gpu_stats():
13
+ """Query GPU stats using nvidia-smi."""
14
+ cmd = [
15
+ "nvidia-smi",
16
+ "--query-gpu=utilization.gpu,utilization.memory,memory.used,memory.total",
17
+ "--format=csv,noheader,nounits"
18
+ ]
19
+ result = subprocess.run(cmd, capture_output=True, text=True)
20
+ if result.returncode != 0:
21
+ return None
22
+
23
+ values = result.stdout.strip().split(", ")
24
+ if len(values) != 4:
25
+ return None
26
+
27
+ return {
28
+ "gpu_util": int(values[0]),
29
+ "mem_util": int(values[1]),
30
+ "mem_used": int(values[2]),
31
+ "mem_total": int(values[3])
32
+ }
33
+
34
+
35
+ def print_header():
36
+ """Print table header."""
37
+ print("+" + "-" * 25 + "+" + "-" * 12 + "+" + "-" * 12 + "+" + "-" * 18 + "+" + "-" * 18 + "+")
38
+ print(f"| {'Timestamp':<23} | {'GPU %':>10} | {'Mem %':>10} | {'Used (MiB)':>16} | {'Total (MiB)':>16} |")
39
+ print("+" + "-" * 25 + "+" + "-" * 12 + "+" + "-" * 12 + "+" + "-" * 18 + "+" + "-" * 18 + "+")
40
+
41
+
42
+ def print_row(stats):
43
+ """Print a table row."""
44
+ timestamp = datetime.now().strftime("%Y/%m/%d %H:%M:%S.%f")[:-3]
45
+ print(f"| {timestamp:<23} | {stats['gpu_util']:>10} | {stats['mem_util']:>10} | {stats['mem_used']:>16} | {stats['mem_total']:>16} |")
46
+
47
+
48
+ def print_footer():
49
+ """Print table footer."""
50
+ print("+" + "-" * 25 + "+" + "-" * 12 + "+" + "-" * 12 + "+" + "-" * 18 + "+" + "-" * 18 + "+")
51
+
52
+
53
+ def main():
54
+ parser = argparse.ArgumentParser(description="Monitor GPU stats as a formatted table")
55
+ parser.add_argument("-i", "--interval", type=float, default=2.0, help="Sampling interval in seconds (default: 2.0)")
56
+ parser.add_argument("-n", "--count", type=int, default=0, help="Number of samples (0 = infinite)")
57
+ args = parser.parse_args()
58
+
59
+ print_header()
60
+
61
+ count = 0
62
+ try:
63
+ while args.count == 0 or count < args.count:
64
+ stats = get_gpu_stats()
65
+ if stats:
66
+ print_row(stats)
67
+ else:
68
+ print("| ERROR: Could not get GPU stats" + " " * 47 + "|")
69
+
70
+ count += 1
71
+ if args.count == 0 or count < args.count:
72
+ time.sleep(args.interval)
73
+ except KeyboardInterrupt:
74
+ pass
75
+
76
+ print_footer()
77
+
78
+
79
+ if __name__ == "__main__":
80
+ main()
scripts/run_conversion.sh ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Run UD conversion with results saved to timestamped folder
3
+ # Usage: ./run_conversion.sh <input_file> [batch_size]
4
+
5
+ set -e
6
+
7
+ INPUT_FILE="${1:-sentences_200.txt}"
8
+ BATCH_SIZE="${2:-64}"
9
+ TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
10
+ RESULTS_DIR="results/${TIMESTAMP}"
11
+
12
+ echo "=== UDD Conversion Run ==="
13
+ echo "Timestamp: ${TIMESTAMP}"
14
+ echo "Input: ${INPUT_FILE}"
15
+ echo "Batch size: ${BATCH_SIZE}"
16
+ echo "Results dir: ${RESULTS_DIR}"
17
+ echo ""
18
+
19
+ # Create results directory
20
+ mkdir -p "${RESULTS_DIR}"
21
+
22
+ # Save run info
23
+ cat > "${RESULTS_DIR}/run_info.txt" << EOF
24
+ Timestamp: ${TIMESTAMP}
25
+ Input file: ${INPUT_FILE}
26
+ Batch size: ${BATCH_SIZE}
27
+ Start time: $(date)
28
+ Host: $(hostname)
29
+ EOF
30
+
31
+ # Get GPU info
32
+ if command -v nvidia-smi &> /dev/null; then
33
+ echo "GPU: $(nvidia-smi --query-gpu=name --format=csv,noheader)" | tee -a "${RESULTS_DIR}/run_info.txt"
34
+ echo "GPU Memory: $(nvidia-smi --query-gpu=memory.total --format=csv,noheader)" | tee -a "${RESULTS_DIR}/run_info.txt"
35
+ fi
36
+
37
+ echo ""
38
+ echo "Starting conversion..."
39
+ START_TIME=$(date +%s)
40
+
41
+ # Start GPU monitoring in background
42
+ if command -v nvidia-smi &> /dev/null; then
43
+ nvidia-smi --query-gpu=timestamp,utilization.gpu,utilization.memory,memory.used,memory.total \
44
+ --format=csv -l 2 > "${RESULTS_DIR}/gpu_stats.csv" 2>&1 &
45
+ GPU_PID=$!
46
+ fi
47
+
48
+ # Run conversion
49
+ python scripts/convert_to_ud.py \
50
+ -i "${INPUT_FILE}" \
51
+ -o "${RESULTS_DIR}" \
52
+ -p "output" \
53
+ -b "${BATCH_SIZE}" \
54
+ 2>&1 | tee "${RESULTS_DIR}/conversion.log"
55
+
56
+ END_TIME=$(date +%s)
57
+ DURATION=$((END_TIME - START_TIME))
58
+
59
+ # Stop GPU monitoring
60
+ if [ ! -z "${GPU_PID}" ]; then
61
+ kill ${GPU_PID} 2>/dev/null || true
62
+ fi
63
+
64
+ # Calculate stats
65
+ MINUTES=$((DURATION / 60))
66
+ SECONDS=$((DURATION % 60))
67
+
68
+ # Count sentences
69
+ if [ -f "${RESULTS_DIR}/output.jsonl" ]; then
70
+ NUM_SENTENCES=$(wc -l < "${RESULTS_DIR}/output.jsonl")
71
+ SPEED=$(echo "scale=2; ${NUM_SENTENCES} / ${DURATION}" | bc 2>/dev/null || echo "N/A")
72
+ else
73
+ NUM_SENTENCES="N/A"
74
+ SPEED="N/A"
75
+ fi
76
+
77
+ # Save summary
78
+ cat >> "${RESULTS_DIR}/run_info.txt" << EOF
79
+
80
+ End time: $(date)
81
+ Duration: ${MINUTES}m ${SECONDS}s (${DURATION} seconds)
82
+ Sentences: ${NUM_SENTENCES}
83
+ Speed: ${SPEED} sentences/sec
84
+ EOF
85
+
86
+ echo ""
87
+ echo "=== Summary ==="
88
+ echo "Duration: ${MINUTES}m ${SECONDS}s"
89
+ echo "Sentences: ${NUM_SENTENCES}"
90
+ echo "Speed: ${SPEED} sentences/sec"
91
+ echo "Results saved to: ${RESULTS_DIR}/"
92
+ echo ""
93
+ echo "Files:"
94
+ ls -la "${RESULTS_DIR}/"
95
+
96
+ # Generate GPU stats table if available
97
+ if [ -f "${RESULTS_DIR}/gpu_stats.csv" ]; then
98
+ echo ""
99
+ echo "=== GPU Stats (last 10 samples) ==="
100
+ tail -10 "${RESULTS_DIR}/gpu_stats.csv" | column -t -s ','
101
+ fi