File size: 4,119 Bytes
0a526a8 0a59837 0a526a8 6fa7ce3 0a526a8 0a59837 0a526a8 0a59837 0a526a8 b3800db 0a59837 b3800db 0a526a8 6fa7ce3 0a526a8 6fa7ce3 b3800db 6fa7ce3 0a526a8 6fa7ce3 0a526a8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Data Processing Notebook\n",
"\n",
"The purpose of this notebook is to help me process the RAW S3 data into the right format to upload to Hugging Face."
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"# Import dependencies\n",
"import os\n",
"import json\n",
"from datetime import datetime"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"# Make directory called `data/faa` if it doesn't already exist.\n",
"if not os.path.exists('data/faa'):\n",
"\tos.makedirs('data/faa')"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Wrote 41164 records to data/faa/faa_xml_data.txt\n",
"Wrote 41164 records to data/faa/faa_xml_data.jsonl\n"
]
}
],
"source": [
"# For each file in `s3_data` directory\n",
"xml_data_strings = []\n",
"for file in os.listdir('s3_data'):\n",
"\t# If file doesn't end in `.txt`, continue\n",
"\tif not file.endswith('.txt'):\n",
"\t\tcontinue\n",
"\t# Open the file in read mode\n",
"\twith open(f's3_data/{file}', 'r') as f:\n",
"\t\t# Read the file content\n",
"\t\tcontent = f.read()\n",
"\n",
"\t\t# Split the content by the separator\n",
"\t\tparts = content.split('<><><><><>')\n",
"\n",
"\t\txml_data = ''\n",
"\t\t# Get the XML data as a string\n",
"\t\tif len(parts) == 1:\n",
"\t\t\txml_data_strings.append(parts[0].split('---')[1].strip())\n",
"\t\telse:\n",
"\t\t\tfor part in parts:\n",
"\t\t\t\txml_data_strings.append(part.split('---')[1].strip())\n",
"\n",
"# Delete any duplicate strings in the xml_data_strings list\n",
"xml_data_strings = list(set(xml_data_strings))\n",
"\n",
"# Sort the XML data strings by the `<AIRPORT_STATUS_INFORMATION><Update_Time>Wed Jul 10 17:18:22 2024 GMT</Update_Time>` date\n",
"def extract_update_time(xml_string):\n",
"\tstart = xml_string.find(\"<Update_Time>\") + len(\"<Update_Time>\")\n",
"\tend = xml_string.find(\"</Update_Time>\")\n",
"\tdate_str = xml_string[start:end].strip()\n",
"\treturn datetime.strptime(date_str, \"%a %b %d %H:%M:%S %Y %Z\")\n",
"xml_data_strings.sort(key=extract_update_time)\n",
"\n",
"# Write the XML data to a TXT file (one XML record per line)\n",
"with open('data/faa/faa_xml_data.txt', 'w') as f:\n",
"\tfor xml_data in xml_data_strings:\n",
"\t\tf.write(xml_data + '\\n')\n",
"\n",
"# Write the XML data to a JSONL file (JSON Lines format - one JSON object per line)\n",
"# This format is well-supported by Hugging Face datasets\n",
"with open('data/faa/faa_xml_data.jsonl', 'w') as f:\n",
"\tfor xml_data in xml_data_strings:\n",
"\t\t# Extract the update time for convenience\n",
"\t\tupdate_time = extract_update_time(xml_data).isoformat()\n",
"\t\t# Write each record as a JSON object on its own line\n",
"\t\tjson_record = {\n",
"\t\t\t\"update_time\": update_time + \"Z\",\n",
"\t\t\t\"xml\": xml_data\n",
"\t\t}\n",
"\t\tf.write(json.dumps(json_record) + '\\n')\n",
"\n",
"print(f\"Wrote {len(xml_data_strings)} records to data/faa/faa_xml_data.txt\")\n",
"print(f\"Wrote {len(xml_data_strings)} records to data/faa/faa_xml_data.jsonl\")\n",
"\n",
"# Delete the xml_data_strings list to free up memory\n",
"del xml_data_strings"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "3.13.0",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|