napppy commited on
Commit
e423ac0
·
1 Parent(s): 041296c

feat: add memberships

Browse files
README.md CHANGED
@@ -3,6 +3,8 @@ license: cc0-1.0
3
  configs:
4
  - config_name: persons
5
  data_files: "databases/persons.parquet"
 
 
6
  tags:
7
  - philippines
8
  - politicians
@@ -26,7 +28,18 @@ This dataset currently contains:
26
  - **Last Name**: Person's last name
27
  - **Name Suffix**: Jr., Sr., I, II, III, IV, etc. (if applicable)
28
 
29
- *More entity types (groups, positions, etc.) will be added in the future.*
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  ## Using the Dataset
32
 
@@ -35,6 +48,7 @@ This dataset currently contains:
35
  You can explore the data directly in your browser using the **Dataset Viewer** tab above.
36
 
37
  - Select **"persons"** from the config dropdown to view person records
 
38
  - Additional entity types will appear in the dropdown as they're added
39
 
40
  The data is available in Parquet format for easy viewing and filtering.
@@ -48,6 +62,10 @@ from datasets import load_dataset
48
  persons = load_dataset("bettergovph/raw-philippine-data", "persons")
49
  print(persons['train'][0])
50
 
 
 
 
 
51
  # Future: Load other entity types
52
  # groups = load_dataset("bettergovph/raw-philippine-data", "groups")
53
  ```
@@ -67,6 +85,9 @@ Example queries:
67
  -- Count all persons
68
  SELECT COUNT(*) FROM persons;
69
 
 
 
 
70
  -- Find all persons with "Jr." suffix
71
  SELECT * FROM persons WHERE name_suffix = 'Jr.' LIMIT 10;
72
 
@@ -79,11 +100,37 @@ FROM persons
79
  WHERE name_suffix IS NOT NULL
80
  GROUP BY name_suffix
81
  ORDER BY count DESC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  ```
83
 
84
  ## Data Sources
85
 
86
- The raw data comes from TOML files in the `data/person/` directory. Each person has their own TOML file with their information.
87
 
88
  ## Regenerating the Dataset
89
 
@@ -95,13 +142,23 @@ pip install -r requirements.txt
95
 
96
  # Load data and export to Parquet
97
  python scripts/load_persons_to_db.py --export-parquet
 
 
 
98
  ```
99
 
100
  This will create:
101
  - `databases/data.duckdb` - DuckDB database for SQL queries
102
- - `databases/persons.parquet` - Parquet file for the Hugging Face dataset viewer
 
 
 
 
 
 
 
103
 
104
- **Note:** Future entity types (groups, positions, etc.) will also generate their own parquet files in the `databases/` folder.
105
 
106
  ## Contributing
107
 
 
3
  configs:
4
  - config_name: persons
5
  data_files: "databases/persons.parquet"
6
+ - config_name: memberships
7
+ data_files: "databases/memberships.parquet"
8
  tags:
9
  - philippines
10
  - politicians
 
28
  - **Last Name**: Person's last name
29
  - **Name Suffix**: Jr., Sr., I, II, III, IV, etc. (if applicable)
30
 
31
+ ### Memberships
32
+ Political party affiliations and positions held by persons, including:
33
+ - **ID**: Unique membership identifier
34
+ - **Person ID**: Links to the person record
35
+ - **Party**: Political party affiliation
36
+ - **Region**: Geographic region (e.g., "National Capital Region", "Region III")
37
+ - **Province**: Province name
38
+ - **Locality**: City or municipality (optional)
39
+ - **Position**: Position held (e.g., "Representative", "Governor", "Mayor")
40
+ - **Year**: Year of the position/membership
41
+
42
+ *More entity types (groups, etc.) will be added in the future.*
43
 
44
  ## Using the Dataset
45
 
 
48
  You can explore the data directly in your browser using the **Dataset Viewer** tab above.
49
 
50
  - Select **"persons"** from the config dropdown to view person records
51
+ - Select **"memberships"** to view political positions and party affiliations
52
  - Additional entity types will appear in the dropdown as they're added
53
 
54
  The data is available in Parquet format for easy viewing and filtering.
 
62
  persons = load_dataset("bettergovph/raw-philippine-data", "persons")
63
  print(persons['train'][0])
64
 
65
+ # Load memberships data
66
+ memberships = load_dataset("bettergovph/raw-philippine-data", "memberships")
67
+ print(memberships['train'][0])
68
+
69
  # Future: Load other entity types
70
  # groups = load_dataset("bettergovph/raw-philippine-data", "groups")
71
  ```
 
85
  -- Count all persons
86
  SELECT COUNT(*) FROM persons;
87
 
88
+ -- Count all memberships
89
+ SELECT COUNT(*) FROM memberships;
90
+
91
  -- Find all persons with "Jr." suffix
92
  SELECT * FROM persons WHERE name_suffix = 'Jr.' LIMIT 10;
93
 
 
100
  WHERE name_suffix IS NOT NULL
101
  GROUP BY name_suffix
102
  ORDER BY count DESC;
103
+
104
+ -- Find all mayors in a specific region
105
+ SELECT p.first_name, p.last_name, m.province, m.locality, m.year
106
+ FROM memberships m
107
+ JOIN persons p ON m.person_id = p.id
108
+ WHERE m.position = 'Mayor'
109
+ AND m.region = 'National Capital Region'
110
+ ORDER BY m.year DESC
111
+ LIMIT 10;
112
+
113
+ -- Count positions by party affiliation
114
+ SELECT party, position, COUNT(*) as count
115
+ FROM memberships
116
+ WHERE party IS NOT NULL
117
+ GROUP BY party, position
118
+ ORDER BY count DESC
119
+ LIMIT 20;
120
+
121
+ -- Find persons with multiple political positions
122
+ SELECT p.first_name, p.last_name, COUNT(*) as position_count
123
+ FROM persons p
124
+ JOIN memberships m ON p.id = m.person_id
125
+ GROUP BY p.id, p.first_name, p.last_name
126
+ HAVING COUNT(*) > 1
127
+ ORDER BY position_count DESC
128
+ LIMIT 10;
129
  ```
130
 
131
  ## Data Sources
132
 
133
+ The raw data comes from TOML files in the `data/person/` directory. Each person has their own TOML file with their information, including an optional `memberships` array that contains their political positions and party affiliations.
134
 
135
  ## Regenerating the Dataset
136
 
 
142
 
143
  # Load data and export to Parquet
144
  python scripts/load_persons_to_db.py --export-parquet
145
+
146
+ # Optional: Use larger batch size for faster loading (recommended for 45k+ files)
147
+ python scripts/load_persons_to_db.py --export-parquet --batch-size 5000
148
  ```
149
 
150
  This will create:
151
  - `databases/data.duckdb` - DuckDB database for SQL queries
152
+ - `databases/persons.parquet` - Persons table in Parquet format
153
+ - `databases/memberships.parquet` - Memberships table in Parquet format
154
+
155
+ The script uses batch inserts for performance and includes:
156
+ - Progress tracking with percentage complete
157
+ - Error logging to `databases/load_errors.log`
158
+ - Total execution time reporting
159
+ - Graceful handling of Ctrl+C interruptions
160
 
161
+ **Note:** Future entity types (groups, etc.) will also generate their own parquet files in the `databases/` folder.
162
 
163
  ## Contributing
164
 
databases/data.duckdb CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:82433f465a60e793068d978c267fc8247e50afe187c3e226f132d3c5ff9385d1
3
- size 19935232
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb25fdfa27ed0962309f0abe2a8983f871098cc9ca82f362cf10ca7626e6321f
3
+ size 16003072
databases/memberships.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3012fe1d67c6b7df46183cde46c424a319553ef79d707a1ec244d4910d27a5e
3
+ size 2665310
scripts/load_persons_to_db.py CHANGED
@@ -11,6 +11,9 @@ import argparse
11
  from pathlib import Path
12
  import tomlkit
13
  import duckdb
 
 
 
14
 
15
  # Add parent directory to path to import schemas and config
16
  sys.path.insert(0, str(Path(__file__).parent.parent))
@@ -52,9 +55,28 @@ def create_memberships_table(conn: duckdb.DuckDBPyConnection):
52
  conn.execute(create_sql)
53
 
54
 
55
- def load_persons_to_db(data_dir: Path, db_path: Path, export_parquet: bool = False, parquet_path: Path = None):
56
- """Load all person TOML files into the DuckDB database."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  print(f"Connecting to database: {db_path}")
 
 
58
  conn = duckdb.connect(str(db_path))
59
 
60
  # Create tables with explicit schema
@@ -65,7 +87,13 @@ def load_persons_to_db(data_dir: Path, db_path: Path, export_parquet: bool = Fal
65
  insert_person_sql = PERSON_SCHEMA.get_insert_sql()
66
  insert_membership_sql = MEMBERSHIP_SCHEMA.get_insert_sql()
67
 
68
- # Load data within a single transaction for performance
 
 
 
 
 
 
69
  print("\nLoading person data...")
70
  loaded_count = 0
71
  error_count = 0
@@ -73,74 +101,130 @@ def load_persons_to_db(data_dir: Path, db_path: Path, export_parquet: bool = Fal
73
  memberships_loaded_count = 0
74
  unknown_fields_seen = set()
75
 
76
- # Start explicit transaction
77
- conn.execute("BEGIN TRANSACTION")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  try:
80
- for toml_file in get_person_toml_files(data_dir):
81
  try:
 
82
  person_data = load_toml_file(toml_file)
 
 
83
 
84
  # Warn about unknown fields (helps catch typos)
85
- # Skip 'memberships' as it's handled separately
86
  for field in person_data.keys():
87
  if field not in PERSON_SCHEMA.schema and field != 'memberships' and field not in unknown_fields_seen:
88
- print(f" Warning: Unknown field '{field}' found in {toml_file.name} (will be ignored)")
89
  unknown_fields_seen.add(field)
90
 
91
- # Build values list in the same order as field_order
92
- # Apply transformation for nested/complex types
93
  values = [
94
  transform_value(field, person_data.get(field), PERSON_SCHEMA.nested_fields)
95
  for field in PERSON_SCHEMA.field_order
96
  ]
 
97
 
98
- # Insert person data
99
- conn.execute(insert_person_sql, values)
100
-
101
- # Extract and insert memberships for this person
102
- person_id = person_data.get('id')
103
- memberships = person_data.get('memberships', [])
104
-
105
  for idx, membership in enumerate(memberships):
106
- # Generate unique ID for membership: person_id + index
107
  membership_id = f"{person_id}_m{idx}"
108
-
109
  membership_values = [
110
  membership_id,
111
  person_id,
112
  membership.get('party'),
113
  membership.get('region'),
114
  membership.get('province'),
115
- membership.get('locality'), # May be None/missing
116
  membership.get('position'),
117
  membership.get('year'),
118
  ]
119
-
120
- conn.execute(insert_membership_sql, membership_values)
121
- memberships_loaded_count += 1
122
-
123
- loaded_count += 1
124
- processed_count += 1
125
-
126
- # Progress indicator
127
- if processed_count % 100 == 0:
128
- print(f" Processed {processed_count} files...")
129
 
130
  except Exception as e:
 
131
  error_count += 1
132
- processed_count += 1
133
- print(f" Error loading {toml_file}: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
- # Commit transaction
136
- conn.execute("COMMIT")
137
- print(" Transaction committed")
138
 
139
- except Exception as e:
140
- # Rollback on error
141
- conn.execute("ROLLBACK")
142
- print(f" Transaction rolled back due to error: {e}")
143
- raise
144
 
145
  # Show summary
146
  print(f"\n{'='*60}")
@@ -148,6 +232,9 @@ def load_persons_to_db(data_dir: Path, db_path: Path, export_parquet: bool = Fal
148
  print(f" Total files processed: {processed_count}")
149
  print(f" Successfully loaded: {loaded_count}")
150
  print(f" Errors: {error_count}")
 
 
 
151
 
152
  # Show database stats
153
  persons_count = conn.execute("SELECT COUNT(*) as total FROM persons").fetchone()
@@ -190,21 +277,34 @@ def load_persons_to_db(data_dir: Path, db_path: Path, export_parquet: bool = Fal
190
  if export_parquet:
191
  print(f"\n{'='*60}")
192
  print("Exporting to Parquet format...")
193
- print(f"Output path: {parquet_path}")
194
 
 
 
 
195
  try:
196
  conn.execute(f"COPY persons TO '{parquet_path}' (FORMAT PARQUET)")
197
-
198
- # Get file size for confirmation
199
  if parquet_path.exists():
200
  file_size = parquet_path.stat().st_size
201
  file_size_mb = file_size / (1024 * 1024)
202
- print(f"✓ Successfully exported to Parquet")
203
- print(f" File size: {file_size_mb:.2f} MB")
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  else:
205
- print("⚠ Export completed but file not found")
206
  except Exception as e:
207
- print(f"✗ Error exporting to Parquet: {e}")
208
 
209
  print(f"{'='*60}")
210
 
@@ -247,13 +347,31 @@ Examples:
247
  parser.add_argument(
248
  '--export-parquet',
249
  action='store_true',
250
- help='Export the persons table to Parquet format after loading'
251
  )
252
  parser.add_argument(
253
  '--parquet-path',
254
  type=Path,
255
  default=Path(__file__).parent.parent / 'databases' / 'persons.parquet',
256
- help='Path for the exported Parquet file (default: databases/persons.parquet)'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  )
258
 
259
  args = parser.parse_args()
@@ -266,7 +384,15 @@ Examples:
266
  # Create databases directory if it doesn't exist
267
  args.db_path.parent.mkdir(parents=True, exist_ok=True)
268
 
269
- load_persons_to_db(args.data_dir, args.db_path, args.export_parquet, args.parquet_path)
 
 
 
 
 
 
 
 
270
 
271
 
272
  if __name__ == '__main__':
 
11
  from pathlib import Path
12
  import tomlkit
13
  import duckdb
14
+ import logging
15
+ from datetime import datetime
16
+ from typing import Tuple, List, Optional
17
 
18
  # Add parent directory to path to import schemas and config
19
  sys.path.insert(0, str(Path(__file__).parent.parent))
 
55
  conn.execute(create_sql)
56
 
57
 
58
+ def load_persons_to_db(
59
+ data_dir: Path,
60
+ db_path: Path,
61
+ export_parquet: bool = False,
62
+ parquet_path: Path = None,
63
+ memberships_parquet_path: Path = None,
64
+ batch_size: int = 1000,
65
+ progress_interval: int = 100
66
+ ):
67
+ """Load all person TOML files into the DuckDB database with batch inserts."""
68
+ # Setup error logging
69
+ error_log_path = db_path.parent / 'load_errors.log'
70
+ logging.basicConfig(
71
+ filename=str(error_log_path),
72
+ level=logging.ERROR,
73
+ format='%(asctime)s - %(message)s',
74
+ filemode='w' # Overwrite previous log
75
+ )
76
+
77
  print(f"Connecting to database: {db_path}")
78
+ print(f"Error log: {error_log_path}")
79
+ print(f"Batch size: {batch_size}")
80
  conn = duckdb.connect(str(db_path))
81
 
82
  # Create tables with explicit schema
 
87
  insert_person_sql = PERSON_SCHEMA.get_insert_sql()
88
  insert_membership_sql = MEMBERSHIP_SCHEMA.get_insert_sql()
89
 
90
+ # Get all TOML files
91
+ print("\nScanning for TOML files...")
92
+ toml_files = list(get_person_toml_files(data_dir))
93
+ total_files = len(toml_files)
94
+ print(f"Found {total_files} files to process")
95
+
96
+ # Load data with parallel processing and batch inserts
97
  print("\nLoading person data...")
98
  loaded_count = 0
99
  error_count = 0
 
101
  memberships_loaded_count = 0
102
  unknown_fields_seen = set()
103
 
104
+ # Batches for accumulating records
105
+ person_batch = []
106
+ membership_batch = []
107
+
108
+ # Track timing for ETA
109
+ start_time = datetime.now()
110
+
111
+ def flush_batches():
112
+ """Helper to insert accumulated batches and commit."""
113
+ nonlocal loaded_count, memberships_loaded_count
114
+
115
+ if not person_batch and not membership_batch:
116
+ return
117
+
118
+ batch_size_to_commit = len(person_batch)
119
+ print(f" Committing batch of {batch_size_to_commit} records...", end='', flush=True)
120
+
121
+ try:
122
+ conn.execute("BEGIN TRANSACTION")
123
+
124
+ # Batch insert persons
125
+ if person_batch:
126
+ conn.executemany(insert_person_sql, person_batch)
127
+ loaded_count += len(person_batch)
128
 
129
+ # Batch insert memberships
130
+ if membership_batch:
131
+ conn.executemany(insert_membership_sql, membership_batch)
132
+ memberships_loaded_count += len(membership_batch)
133
+
134
+ conn.execute("COMMIT")
135
+ print(f" done!")
136
+
137
+ except Exception as e:
138
+ conn.execute("ROLLBACK")
139
+ logging.error(f"Batch insert failed: {e}")
140
+ print(f"\n Warning: Batch insert failed, see error log")
141
+
142
+ person_batch.clear()
143
+ membership_batch.clear()
144
+
145
+ # Process files sequentially
146
  try:
147
+ for toml_file in toml_files:
148
  try:
149
+ # Read and parse TOML file
150
  person_data = load_toml_file(toml_file)
151
+ person_id = person_data.get('id')
152
+ memberships = person_data.get('memberships', [])
153
 
154
  # Warn about unknown fields (helps catch typos)
 
155
  for field in person_data.keys():
156
  if field not in PERSON_SCHEMA.schema and field != 'memberships' and field not in unknown_fields_seen:
157
+ print(f" Warning: Unknown field '{field}' (will be ignored)")
158
  unknown_fields_seen.add(field)
159
 
160
+ # Build values list for person
 
161
  values = [
162
  transform_value(field, person_data.get(field), PERSON_SCHEMA.nested_fields)
163
  for field in PERSON_SCHEMA.field_order
164
  ]
165
+ person_batch.append(values)
166
 
167
+ # Add memberships to batch
 
 
 
 
 
 
168
  for idx, membership in enumerate(memberships):
 
169
  membership_id = f"{person_id}_m{idx}"
 
170
  membership_values = [
171
  membership_id,
172
  person_id,
173
  membership.get('party'),
174
  membership.get('region'),
175
  membership.get('province'),
176
+ membership.get('locality'),
177
  membership.get('position'),
178
  membership.get('year'),
179
  ]
180
+ membership_batch.append(membership_values)
 
 
 
 
 
 
 
 
 
181
 
182
  except Exception as e:
183
+ # File processing failed
184
  error_count += 1
185
+ logging.error(f"{toml_file}: {e}")
186
+
187
+ processed_count += 1
188
+
189
+ # Flush batches when reaching batch size
190
+ if len(person_batch) >= batch_size:
191
+ flush_batches()
192
+
193
+ # Progress indicator
194
+ if processed_count % progress_interval == 0 or processed_count == total_files:
195
+ pct = (processed_count / total_files * 100) if total_files > 0 else 0
196
+ # Show committed + pending records
197
+ pending = len(person_batch)
198
+ total_loaded = loaded_count + pending
199
+ print(f" [{pct:5.1f}%] {processed_count}/{total_files} files | "
200
+ f"{total_loaded} loaded ({loaded_count} committed, {pending} pending) | "
201
+ f"{error_count} errors")
202
+
203
+ except KeyboardInterrupt:
204
+ print("\n\n*** Interrupted by user (Ctrl+C) ***")
205
+ print("Flushing any pending records to database...")
206
+ flush_batches()
207
+
208
+ # Calculate time up to interruption
209
+ end_time = datetime.now()
210
+ total_seconds = (end_time - start_time).total_seconds()
211
+ total_minutes = total_seconds / 60
212
+
213
+ print(f"\nPartial load completed:")
214
+ print(f" Files processed: {processed_count}/{total_files}")
215
+ print(f" Records loaded: {loaded_count}")
216
+ print(f" Errors: {error_count}")
217
+ print(f" Time elapsed: {total_minutes:.1f} minutes ({total_seconds:.0f} seconds)")
218
+ conn.close()
219
+ sys.exit(1)
220
 
221
+ # Flush any remaining records
222
+ flush_batches()
 
223
 
224
+ # Calculate total time
225
+ end_time = datetime.now()
226
+ total_seconds = (end_time - start_time).total_seconds()
227
+ total_minutes = total_seconds / 60
 
228
 
229
  # Show summary
230
  print(f"\n{'='*60}")
 
232
  print(f" Total files processed: {processed_count}")
233
  print(f" Successfully loaded: {loaded_count}")
234
  print(f" Errors: {error_count}")
235
+ print(f" Total time: {total_minutes:.1f} minutes ({total_seconds:.0f} seconds)")
236
+ if error_count > 0:
237
+ print(f" Error details logged to: {error_log_path}")
238
 
239
  # Show database stats
240
  persons_count = conn.execute("SELECT COUNT(*) as total FROM persons").fetchone()
 
277
  if export_parquet:
278
  print(f"\n{'='*60}")
279
  print("Exporting to Parquet format...")
 
280
 
281
+ # Export persons table
282
+ print(f"\nExporting persons table...")
283
+ print(f" Output: {parquet_path}")
284
  try:
285
  conn.execute(f"COPY persons TO '{parquet_path}' (FORMAT PARQUET)")
 
 
286
  if parquet_path.exists():
287
  file_size = parquet_path.stat().st_size
288
  file_size_mb = file_size / (1024 * 1024)
289
+ print(f" ✓ Successfully exported persons.parquet ({file_size_mb:.2f} MB)")
290
+ else:
291
+ print(" ⚠ Export completed but file not found")
292
+ except Exception as e:
293
+ print(f" ✗ Error exporting persons: {e}")
294
+
295
+ # Export memberships table
296
+ print(f"\nExporting memberships table...")
297
+ print(f" Output: {memberships_parquet_path}")
298
+ try:
299
+ conn.execute(f"COPY memberships TO '{memberships_parquet_path}' (FORMAT PARQUET)")
300
+ if memberships_parquet_path.exists():
301
+ file_size = memberships_parquet_path.stat().st_size
302
+ file_size_mb = file_size / (1024 * 1024)
303
+ print(f" ✓ Successfully exported memberships.parquet ({file_size_mb:.2f} MB)")
304
  else:
305
+ print(" ⚠ Export completed but file not found")
306
  except Exception as e:
307
+ print(f" ✗ Error exporting memberships: {e}")
308
 
309
  print(f"{'='*60}")
310
 
 
347
  parser.add_argument(
348
  '--export-parquet',
349
  action='store_true',
350
+ help='Export the persons and memberships tables to Parquet format after loading'
351
  )
352
  parser.add_argument(
353
  '--parquet-path',
354
  type=Path,
355
  default=Path(__file__).parent.parent / 'databases' / 'persons.parquet',
356
+ help='Path for the exported persons Parquet file (default: databases/persons.parquet)'
357
+ )
358
+ parser.add_argument(
359
+ '--memberships-parquet-path',
360
+ type=Path,
361
+ default=Path(__file__).parent.parent / 'databases' / 'memberships.parquet',
362
+ help='Path for the exported memberships Parquet file (default: databases/memberships.parquet)'
363
+ )
364
+ parser.add_argument(
365
+ '--batch-size',
366
+ type=int,
367
+ default=1000,
368
+ help='Number of records to insert per batch/transaction (default: 1000)'
369
+ )
370
+ parser.add_argument(
371
+ '--progress-interval',
372
+ type=int,
373
+ default=100,
374
+ help='Show progress every N files (default: 100)'
375
  )
376
 
377
  args = parser.parse_args()
 
384
  # Create databases directory if it doesn't exist
385
  args.db_path.parent.mkdir(parents=True, exist_ok=True)
386
 
387
+ load_persons_to_db(
388
+ args.data_dir,
389
+ args.db_path,
390
+ args.export_parquet,
391
+ args.parquet_path,
392
+ args.memberships_parquet_path,
393
+ args.batch_size,
394
+ args.progress_interval
395
+ )
396
 
397
 
398
  if __name__ == '__main__':