File size: 12,312 Bytes
c1cf2fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
#!/usr/bin/env python3
"""
Load document data from text files into a DuckDB database.

This script scans the data/document directory for text files and loads them
into a local DuckDB database. Documents are organized by type (sb/hb), congress,
and document number.

Directory structure: data/document/{type}/{congress}/{range}/{TYPE}-{number}.txt
Example: data/document/sb/20/00001-01000/SB-00002.txt
  - type: sb (senate bill) or hb (house bill)
  - congress: 20 (20th congress)
  - document_number: 2
"""

import sys
import argparse
from pathlib import Path
import duckdb
import logging
from datetime import datetime
import re

# Add parent directory to path to import schemas and config
sys.path.insert(0, str(Path(__file__).parent.parent))

from schemas.document import DOCUMENT_SCHEMA
from config import DATABASE_PATH, DOCUMENT_DATA_DIR


def parse_document_path(file_path: Path) -> dict:
    """
    Parse document file path to extract metadata.

    Args:
        file_path: Path to document file

    Returns:
        Dict with document_type, congress, document_number

    Example:
        data/document/sb/20/00001-01000/SB-00002.txt ->
        {
            'document_type': 'sb',
            'congress': 20,
            'document_number': 2
        }
    """
    # Get parts from path
    parts = file_path.parts

    # Find the document type directory (sb, hb, etc.)
    try:
        # Path structure: data/document/{type}/{congress}/{range}/{TYPE}-{number}.txt
        doc_index = parts.index('document')
        document_type = parts[doc_index + 1].lower()
        congress = int(parts[doc_index + 2])

        # Parse filename: SB-00002.txt -> document_number = 2
        filename = file_path.stem  # Remove .txt extension
        match = re.match(r'^[A-Z]+-(\d+)$', filename)
        if match:
            document_number = int(match.group(1))
        else:
            raise ValueError(f"Cannot parse document number from filename: {filename}")

        return {
            'document_type': document_type,
            'congress': congress,
            'document_number': document_number
        }
    except (ValueError, IndexError) as e:
        raise ValueError(f"Cannot parse document path: {file_path}") from e


def get_document_files(data_dir: Path):
    """Recursively find all .txt files in the document data directory (generator)."""
    return data_dir.glob('**/*.txt')


def create_documents_table(conn: duckdb.DuckDBPyConnection):
    """Create the documents table using the explicit schema."""
    create_sql = DOCUMENT_SCHEMA.get_create_table_sql()

    print(f"Creating table '{DOCUMENT_SCHEMA.table_name}' with {len(DOCUMENT_SCHEMA.schema)} columns:")
    print(f"  Fields: {', '.join(DOCUMENT_SCHEMA.field_order)}")

    conn.execute(create_sql)


def load_documents_to_db(
    data_dir: Path,
    db_path: Path,
    export_parquet: bool = False,
    parquet_path: Path = None,
    batch_size: int = 1000,
    progress_interval: int = 100
):
    """Load all document text files into the DuckDB database with batch inserts."""
    # Setup error logging
    error_log_path = db_path.parent / 'load_documents_errors.log'
    logging.basicConfig(
        filename=str(error_log_path),
        level=logging.ERROR,
        format='%(asctime)s - %(message)s',
        filemode='w'  # Overwrite previous log
    )

    print(f"Connecting to database: {db_path}")
    print(f"Error log: {error_log_path}")
    print(f"Batch size: {batch_size}")
    conn = duckdb.connect(str(db_path))

    # Create table with explicit schema
    create_documents_table(conn)

    # Build INSERT statement using schema definition
    insert_sql = DOCUMENT_SCHEMA.get_insert_sql()

    # Get all text files
    print("\nScanning for document files...")
    doc_files = list(get_document_files(data_dir))
    total_files = len(doc_files)
    print(f"Found {total_files} files to process")

    # Load data with batch inserts
    print("\nLoading document data...")
    loaded_count = 0
    error_count = 0
    processed_count = 0

    # Batch for accumulating records
    document_batch = []

    # Track timing for ETA
    start_time = datetime.now()

    def flush_batch():
        """Helper to insert accumulated batch and commit."""
        nonlocal loaded_count

        if not document_batch:
            return

        batch_size_to_commit = len(document_batch)
        print(f"  Committing batch of {batch_size_to_commit} records...", end='', flush=True)

        try:
            conn.execute("BEGIN TRANSACTION")
            conn.executemany(insert_sql, document_batch)
            conn.execute("COMMIT")
            loaded_count += len(document_batch)
            print(f" done!")

        except Exception as e:
            conn.execute("ROLLBACK")
            logging.error(f"Batch insert failed: {e}")
            print(f"\n  Warning: Batch insert failed, see error log")

        document_batch.clear()

    # Process files
    try:
        for doc_file in doc_files:
            try:
                # Parse metadata from path
                metadata = parse_document_path(doc_file)

                # Read file content
                with open(doc_file, 'r', encoding='utf-8') as f:
                    content = f.read()

                # Create document ID: {type}-{congress}-{number}
                doc_id = f"{metadata['document_type']}-{metadata['congress']}-{metadata['document_number']}"

                # Build values list
                values = [
                    doc_id,
                    metadata['document_type'],
                    metadata['congress'],
                    metadata['document_number'],
                    str(doc_file),
                    content
                ]
                document_batch.append(values)

            except Exception as e:
                # File processing failed
                error_count += 1
                logging.error(f"{doc_file}: {e}")

            processed_count += 1

            # Flush batch when reaching batch size
            if len(document_batch) >= batch_size:
                flush_batch()

            # Progress indicator
            if processed_count % progress_interval == 0 or processed_count == total_files:
                pct = (processed_count / total_files * 100) if total_files > 0 else 0
                pending = len(document_batch)
                total_loaded = loaded_count + pending
                print(f"  [{pct:5.1f}%] {processed_count}/{total_files} files | "
                      f"{total_loaded} loaded ({loaded_count} committed, {pending} pending) | "
                      f"{error_count} errors")

    except KeyboardInterrupt:
        print("\n\n*** Interrupted by user (Ctrl+C) ***")
        print("Flushing any pending records to database...")
        flush_batch()

        # Calculate time up to interruption
        end_time = datetime.now()
        total_seconds = (end_time - start_time).total_seconds()
        total_minutes = total_seconds / 60

        print(f"\nPartial load completed:")
        print(f"  Files processed: {processed_count}/{total_files}")
        print(f"  Records loaded: {loaded_count}")
        print(f"  Errors: {error_count}")
        print(f"  Time elapsed: {total_minutes:.1f} minutes ({total_seconds:.0f} seconds)")
        conn.close()
        sys.exit(1)

    # Flush any remaining records
    flush_batch()

    # Calculate total time
    end_time = datetime.now()
    total_seconds = (end_time - start_time).total_seconds()
    total_minutes = total_seconds / 60

    # Show summary
    print(f"\n{'='*60}")
    print(f"Load complete!")
    print(f"  Total files processed: {processed_count}")
    print(f"  Successfully loaded: {loaded_count}")
    print(f"  Errors: {error_count}")
    print(f"  Total time: {total_minutes:.1f} minutes ({total_seconds:.0f} seconds)")
    if error_count > 0:
        print(f"  Error details logged to: {error_log_path}")

    # Show database stats
    documents_count = conn.execute("SELECT COUNT(*) as total FROM documents").fetchone()
    print(f"  Total documents in database: {documents_count[0]}")
    print(f"{'='*60}")

    # Show sample data
    print("\nSample documents (first 5 rows):")
    sample = conn.execute("""
        SELECT id, document_type, congress, document_number, LENGTH(content) as content_length
        FROM documents
        ORDER BY document_type, congress, document_number
        LIMIT 5
    """).fetchall()

    for row in sample:
        print(f"  {row[0]}: {row[1].upper()}-{row[3]} (Congress {row[2]}, {row[4]} chars)")

    # Show statistics by type
    print("\nDocument statistics by type:")
    stats = conn.execute("""
        SELECT
            document_type,
            COUNT(*) as count,
            MIN(congress) as min_congress,
            MAX(congress) as max_congress,
            MIN(document_number) as min_doc_num,
            MAX(document_number) as max_doc_num
        FROM documents
        GROUP BY document_type
        ORDER BY document_type
    """).fetchall()

    for row in stats:
        print(f"  {row[0].upper()}: {row[1]} documents (Congress {row[2]}-{row[3]}, "
              f"Doc# {row[4]}-{row[5]})")

    # Export to Parquet if requested
    if export_parquet:
        print(f"\n{'='*60}")
        print("Exporting to Parquet format...")
        print(f"  Output: {parquet_path}")

        try:
            conn.execute(f"COPY documents TO '{parquet_path}' (FORMAT PARQUET)")
            if parquet_path.exists():
                file_size = parquet_path.stat().st_size
                file_size_mb = file_size / (1024 * 1024)
                print(f"  Successfully exported documents.parquet ({file_size_mb:.2f} MB)")
            else:
                print("  Warning: Export completed but file not found")
        except Exception as e:
            print(f"  Error exporting: {e}")

        print(f"{'='*60}")

    conn.close()
    print(f"\nDatabase saved to: {db_path}")


def main():
    """Main entry point."""
    parser = argparse.ArgumentParser(
        description='Load document data from text files into a DuckDB database',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Examples:
  # Use default database path (databases/data.duckdb)
  python scripts/load_documents_to_db.py

  # Specify custom database path
  python scripts/load_documents_to_db.py --db-path /path/to/custom.duckdb

  # Use a different data directory
  python scripts/load_documents_to_db.py --data-dir /path/to/document/data

  # Export to Parquet for Hugging Face dataset viewer
  python scripts/load_documents_to_db.py --export-parquet
        """
    )
    parser.add_argument(
        '--db-path',
        type=Path,
        default=DATABASE_PATH,
        help=f'Path to the DuckDB database (default: {DATABASE_PATH})'
    )
    parser.add_argument(
        '--data-dir',
        type=Path,
        default=DOCUMENT_DATA_DIR,
        help=f'Path to the document data directory (default: {DOCUMENT_DATA_DIR})'
    )
    parser.add_argument(
        '--export-parquet',
        action='store_true',
        help='Export the documents table to Parquet format after loading'
    )
    parser.add_argument(
        '--parquet-path',
        type=Path,
        default=Path(__file__).parent.parent / 'databases' / 'documents.parquet',
        help='Path for the exported Parquet file (default: databases/documents.parquet)'
    )
    parser.add_argument(
        '--batch-size',
        type=int,
        default=1000,
        help='Number of records to insert per batch/transaction (default: 1000)'
    )
    parser.add_argument(
        '--progress-interval',
        type=int,
        default=100,
        help='Show progress every N files (default: 100)'
    )

    args = parser.parse_args()

    # Validate data directory exists
    if not args.data_dir.exists():
        print(f"Error: Data directory not found: {args.data_dir}")
        sys.exit(1)

    # Create databases directory if it doesn't exist
    args.db_path.parent.mkdir(parents=True, exist_ok=True)

    load_documents_to_db(
        args.data_dir,
        args.db_path,
        args.export_parquet,
        args.parquet_path,
        args.batch_size,
        args.progress_interval
    )


if __name__ == '__main__':
    main()