TomerGabay commited on
Commit
caa2ed1
·
verified ·
1 Parent(s): 162bc13

added process_data.py and requirements.txt

Browse files
Files changed (2) hide show
  1. process_data.py +229 -0
  2. requirements.txt +4 -0
process_data.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import glob
3
+ import json
4
+ import logging
5
+ import multiprocessing
6
+ import os
7
+ import xml.etree.ElementTree as ET
8
+ from datetime import datetime
9
+ from typing import List, Optional, Set
10
+
11
+ import pandas as pd
12
+ from datasets import Dataset
13
+ from dotenv import load_dotenv
14
+ from monumenten import MonumentenClient
15
+
16
+ load_dotenv()
17
+
18
+ # Configure logging
19
+ logging.basicConfig(level=logging.INFO)
20
+ logger = logging.getLogger(__name__)
21
+
22
+ # Define constants
23
+ XML_DIRECTORY = "vbo_xmls/"
24
+ INTERMEDIATE_CSV_PATH = "verblijfsobjecten_ids.csv"
25
+ FINAL_CSV_PATH = "monumenten.csv"
26
+
27
+ HF_REPO_ID = "woonstadrotterdam/monumenten"
28
+ HF_TOKEN = os.getenv("HF_TOKEN")
29
+
30
+
31
+ def is_valid_identificatie(id_value: str) -> bool:
32
+ """
33
+ Validate if the ID is a proper verblijfsobject ID.
34
+ Valid IDs must be 16 characters long, consist of digits,
35
+ and have '01' at positions 4-5 (0-indexed).
36
+ Example: 'xxxx01xxxxxxxxxx' where x is a digit (e.g., '0304010000269586').
37
+ """
38
+ if id_value is None:
39
+ return False
40
+ return len(id_value) == 16 and id_value.isdigit() and id_value[4:6] == "01"
41
+
42
+
43
+ def extract_identificaties(xml_path: str) -> List[str]:
44
+ """
45
+ Extract all valid identificatie values from a single XML file using iterative parsing.
46
+ """
47
+ identificaties = []
48
+ try:
49
+ context = ET.iterparse(xml_path, events=("end",))
50
+ for event, elem in context:
51
+ if elem.tag.endswith("identificatie"):
52
+ id_value = elem.text
53
+ if is_valid_identificatie(id_value):
54
+ identificaties.append(id_value)
55
+ elem.clear() # Free memory
56
+
57
+ if identificaties:
58
+ logger.debug(
59
+ f"Found {len(identificaties)} valid identificatie values in {xml_path}"
60
+ )
61
+ return identificaties
62
+ except Exception as e:
63
+ logger.error(f"Error parsing XML file {xml_path}: {e}")
64
+ return []
65
+
66
+
67
+ def get_xml_files() -> List[str]:
68
+ """
69
+ Get list of XML files from the specified directory.
70
+ """
71
+ xml_files = glob.glob(os.path.join(XML_DIRECTORY, "*.xml"))
72
+ if not xml_files:
73
+ logger.error(f"No XML files found in {XML_DIRECTORY}")
74
+ else:
75
+ logger.info(f"Found {len(xml_files)} XML files in {XML_DIRECTORY}")
76
+ return xml_files
77
+
78
+
79
+ def process_files_parallel(xml_files: List[str]) -> Set[str]:
80
+ """
81
+ Process XML files in parallel using multiprocessing.
82
+ Returns a set of unique identificaties.
83
+ """
84
+ unique_identificaties = set()
85
+
86
+ logger.info(f"Starting parallel processing of {len(xml_files)} XML files...")
87
+ with multiprocessing.Pool() as pool:
88
+ results = pool.imap_unordered(extract_identificaties, xml_files)
89
+ for i, file_identificaties in enumerate(results):
90
+ unique_identificaties.update(file_identificaties)
91
+ if (i + 1) % 100 == 0: # Log progress every 100 files
92
+ logger.info(
93
+ f"Processed {i + 1}/{len(xml_files)} files. "
94
+ f"Current unique identificaties: {len(unique_identificaties)}"
95
+ )
96
+
97
+ logger.info(
98
+ f"All files processed. Total unique identificaties found: {len(unique_identificaties)}"
99
+ )
100
+ return unique_identificaties
101
+
102
+
103
+ def create_identificaties_dataframe(unique_ids: Set[str]) -> Optional[pd.DataFrame]:
104
+ """
105
+ Create and save DataFrame from unique identificaties.
106
+ Returns the DataFrame or None if no valid identificaties found.
107
+ """
108
+ if not unique_ids:
109
+ logger.info("No valid identificaties found.")
110
+ return None
111
+
112
+ df = pd.DataFrame(list(unique_ids), columns=["bag_verblijfsobject_id"])
113
+ logger.info(f"Created DataFrame with {len(df)} unique valid identificaties.")
114
+
115
+ # Save intermediate results
116
+ df.to_csv(INTERMEDIATE_CSV_PATH, index=False)
117
+ logger.info(f"Saved DataFrame to {INTERMEDIATE_CSV_PATH}")
118
+
119
+ # Display info
120
+ print("\nFirst few rows of the extracted identificaties DataFrame:")
121
+ print(df.head())
122
+ print("\nIdentificaties DataFrame Info:")
123
+ df.info()
124
+
125
+ return df
126
+
127
+
128
+ async def process_with_monumenten_client(df: pd.DataFrame) -> Optional[pd.DataFrame]:
129
+ """
130
+ Process the DataFrame using MonumentenClient.
131
+ Returns processed DataFrame or None if processing fails.
132
+ """
133
+ if df.empty:
134
+ logger.warning("Empty DataFrame provided to MonumentenClient.")
135
+ return None
136
+
137
+ logger.info(f"Processing {len(df)} identificaties with MonumentenClient...")
138
+ try:
139
+ async with MonumentenClient() as client:
140
+ result_df = await client.process_from_df(
141
+ df=df, verblijfsobject_id_col="bag_verblijfsobject_id"
142
+ )
143
+ logger.info("Finished processing with MonumentenClient.")
144
+ return result_df
145
+ except Exception as e:
146
+ logger.error(f"Error processing with MonumentenClient: {e}")
147
+ return None
148
+
149
+
150
+ def save_final_results(result_df: Optional[pd.DataFrame]) -> None:
151
+ """
152
+ Save the final results to CSV if valid data is present.
153
+ """
154
+ if result_df is not None and not result_df.empty:
155
+ result_df.to_csv(FINAL_CSV_PATH, index=False)
156
+ logger.info(f"Successfully saved final monumenten data to {FINAL_CSV_PATH}")
157
+ print(f"\nFinal data saved to {FINAL_CSV_PATH}")
158
+ print(result_df.head())
159
+ # Push to Hugging Face
160
+ if push_to_huggingface(result_df):
161
+ print(f"\nData successfully pushed to Hugging Face dataset: {HF_REPO_ID}")
162
+ else:
163
+ print("\nFailed to push data to Hugging Face. Check logs for details.")
164
+ elif result_df is not None and result_df.empty:
165
+ logger.info("Processing resulted in an empty DataFrame. Nothing to save.")
166
+ print("\nProcessing resulted in an empty DataFrame.")
167
+ else:
168
+ logger.warning("No valid data to save. Process did not complete successfully.")
169
+ print("\nProcess did not complete successfully or returned no data.")
170
+
171
+
172
+ def push_to_huggingface(result_df: pd.DataFrame) -> bool:
173
+ """
174
+ Push the final results to Hugging Face datasets hub using datasets.push_to_hub
175
+ with a custom split name.
176
+ Returns True if successful, False otherwise.
177
+ """
178
+ if not HF_TOKEN:
179
+ logger.error("No Hugging Face token found in environment variables (HF_TOKEN)")
180
+ return False
181
+
182
+ if result_df.empty:
183
+ logger.warning(
184
+ "Result DataFrame is empty. Skipping push of main dataset to Hugging Face."
185
+ )
186
+ else:
187
+ logger.info(
188
+ f"Converting DataFrame with {len(result_df)} rows to Hugging Face Dataset."
189
+ )
190
+
191
+ hf_dataset_single = Dataset.from_pandas(result_df)
192
+
193
+ hf_dataset_single.push_to_hub(
194
+ repo_id=HF_REPO_ID,
195
+ commit_message=f"Update monumenten dataset",
196
+ token=HF_TOKEN,
197
+ )
198
+ logger.info(f"Successfully pushed dataset dictionary to {HF_REPO_ID}")
199
+
200
+
201
+ async def main() -> Optional[pd.DataFrame]:
202
+ """
203
+ Main function orchestrating the entire process.
204
+ Returns the final processed DataFrame or None if processing fails.
205
+ """
206
+ # Get XML files
207
+ xml_files = get_xml_files()
208
+ if not xml_files:
209
+ return None
210
+
211
+ # Process files and get unique identificaties
212
+ unique_identificaties = process_files_parallel(xml_files)
213
+
214
+ # Create DataFrame from unique identificaties
215
+ df = create_identificaties_dataframe(unique_identificaties)
216
+ if df is None:
217
+ return None
218
+
219
+ # Process with MonumentenClient
220
+ result_df = await process_with_monumenten_client(df)
221
+ return result_df
222
+
223
+
224
+ if __name__ == "__main__":
225
+ # Run main process
226
+ result_dataframe = asyncio.run(main())
227
+
228
+ # Save results
229
+ save_final_results(result_dataframe)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ huggingface-hub
2
+ dotenv
3
+ monumenten==0.3.1
4
+ datasets