{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Data Processing Notebook\n", "\n", "The purpose of this notebook is to help me process the RAW S3 data into the right format to upload to Hugging Face." ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "# Import dependencies\n", "import os\n", "import json\n", "from datetime import datetime" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [], "source": [ "# Make directory called `data/faa` if it doesn't already exist.\n", "if not os.path.exists('data/faa'):\n", "\tos.makedirs('data/faa')" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wrote 41164 records to data/faa/faa_xml_data.txt\n", "Wrote 41164 records to data/faa/faa_xml_data.jsonl\n" ] } ], "source": [ "# For each file in `s3_data` directory\n", "xml_data_strings = []\n", "for file in os.listdir('s3_data'):\n", "\t# If file doesn't end in `.txt`, continue\n", "\tif not file.endswith('.txt'):\n", "\t\tcontinue\n", "\t# Open the file in read mode\n", "\twith open(f's3_data/{file}', 'r') as f:\n", "\t\t# Read the file content\n", "\t\tcontent = f.read()\n", "\n", "\t\t# Split the content by the separator\n", "\t\tparts = content.split('<><><><><>')\n", "\n", "\t\txml_data = ''\n", "\t\t# Get the XML data as a string\n", "\t\tif len(parts) == 1:\n", "\t\t\txml_data_strings.append(parts[0].split('---')[1].strip())\n", "\t\telse:\n", "\t\t\tfor part in parts:\n", "\t\t\t\txml_data_strings.append(part.split('---')[1].strip())\n", "\n", "# Delete any duplicate strings in the xml_data_strings list\n", "xml_data_strings = list(set(xml_data_strings))\n", "\n", "# Sort the XML data strings by the `Wed Jul 10 17:18:22 2024 GMT` date\n", "def extract_update_time(xml_string):\n", "\tstart = xml_string.find(\"\") + len(\"\")\n", "\tend = xml_string.find(\"\")\n", "\tdate_str = xml_string[start:end].strip()\n", "\treturn datetime.strptime(date_str, \"%a %b %d %H:%M:%S %Y %Z\")\n", "xml_data_strings.sort(key=extract_update_time)\n", "\n", "# Write the XML data to a TXT file (one XML record per line)\n", "with open('data/faa/faa_xml_data.txt', 'w') as f:\n", "\tfor xml_data in xml_data_strings:\n", "\t\tf.write(xml_data + '\\n')\n", "\n", "# Write the XML data to a JSONL file (JSON Lines format - one JSON object per line)\n", "# This format is well-supported by Hugging Face datasets\n", "with open('data/faa/faa_xml_data.jsonl', 'w') as f:\n", "\tfor xml_data in xml_data_strings:\n", "\t\t# Extract the update time for convenience\n", "\t\tupdate_time = extract_update_time(xml_data).isoformat()\n", "\t\t# Write each record as a JSON object on its own line\n", "\t\tjson_record = {\n", "\t\t\t\"update_time\": update_time + \"Z\",\n", "\t\t\t\"xml\": xml_data\n", "\t\t}\n", "\t\tf.write(json.dumps(json_record) + '\\n')\n", "\n", "print(f\"Wrote {len(xml_data_strings)} records to data/faa/faa_xml_data.txt\")\n", "print(f\"Wrote {len(xml_data_strings)} records to data/faa/faa_xml_data.jsonl\")\n", "\n", "# Delete the xml_data_strings list to free up memory\n", "del xml_data_strings" ] } ], "metadata": { "kernelspec": { "display_name": "3.13.0", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.13.0" } }, "nbformat": 4, "nbformat_minor": 2 }