{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "bed45d12-7681-4ba4-9c89-48a3515704e2", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "File ‘hinglishNorm.json’ already there; not retrieving.\n", "\n" ] } ], "source": [ "!wget -nc https://raw.githubusercontent.com/piyushmakhija5/hinglishNorm/master/dataset/hinglishNorm.json" ] }, { "cell_type": "code", "execution_count": 2, "id": "965589a9-c62e-4659-a6bc-6f0a2bad5d19", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Index(['id', 'inputText', 'tags', 'normalizedText'], dtype='object')" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import pandas as pd\n", "\n", "df = pd.read_json(\"./hinglishNorm.json\")\n", "df.columns" ] }, { "cell_type": "markdown", "id": "b5c7c7c7-b9a6-4ea2-a5ef-edaf982ae0ad", "metadata": {}, "source": [ "### Required columns\n", "- target_hinglish\n", "- source_hindi\n", "- parallel_english\n", "- annotations\n", "- raw_input\n", "- alternates\n", "\n", "> For **hinglishNorm**, only `target_hinglish`, `raw_input` and `annotations` are valid\n", "\n", "### Mappings\n", "- `normalizedText` _=>_ `target_hinglish`\n", "- `inputText` _=>_ `raw_input`\n", "- `tags` _=>_ `annotations` (after json.loads)" ] }, { "cell_type": "code", "execution_count": 3, "id": "b54fdd52-1ab0-4c84-89e5-0bcb8fcbfbeb", "metadata": {}, "outputs": [], "source": [ "# Add empty columns\n", "df[\"source_hindi\"] = \\\n", " df[\"parallel_english\"] = \\\n", " df[\"alternates\"] = None\n", "\n", "# Remove unnecessary columns\n", "df = df.drop(\"id\", axis=1)\n", "\n", "# Rename columns\n", "df = df.rename(columns={\n", " \"normalizedText\": \"target_hinglish\", \n", " \"inputText\": \"raw_input\", \n", " \"tags\": \"annotations\", })\n", "\n", "# Parse annotations json\n", "import json\n", "df[\"annotations\"] = df[\"annotations\"].map(lambda x: json.loads(x.replace(\"'\", '\"')))\n", "\n", "# Split dataset\n", "from sklearn.model_selection import train_test_split\n", "_train_eval_df, test_df = train_test_split(df, test_size=0.1)\n", "train_df, eval_df = train_test_split(_train_eval_df, test_size=0.1)" ] }, { "cell_type": "code", "execution_count": 5, "id": "6e804366-34cd-45c7-b3c6-46b7b8c1b420", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Collecting tables\n", " Downloading tables-3.7.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.9 MB)\n", " |████████████████████████████████| 5.9 MB 4.9 MB/s \n", "\u001b[?25hRequirement already satisfied: numpy>=1.19.0 in /opt/conda/lib/python3.7/site-packages (from tables) (1.19.5)\n", "Requirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from tables) (21.3)\n", "Collecting numexpr>=2.6.2\n", " Downloading numexpr-2.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (379 kB)\n", " |████████████████████████████████| 379 kB 73.4 MB/s \n", "\u001b[?25hRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->tables) (3.0.6)\n", "Installing collected packages: numexpr, tables\n", "Successfully installed numexpr-2.8.1 tables-3.7.0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py:2718: PerformanceWarning: \n", "your performance may suffer as PyTables will pickle object types that it cannot\n", "map directly to c-types [inferred_type->mixed,key->block0_values] [items->Index(['raw_input', 'annotations', 'target_hinglish', 'source_hindi',\n", " 'parallel_english', 'alternates'],\n", " dtype='object')]\n", "\n", " encoding=encoding,\n" ] } ], "source": [ "!pip install tables\n", "\n", "# Save to hdfs files\n", "train_df.to_hdf(\"./data.h5\", \"train\", complevel=9)\n", "test_df.to_hdf(\"./data.h5\", \"test\", complevel=9)\n", "eval_df.to_hdf(\"./data.h5\", \"eval\", complevel=9)" ] }, { "cell_type": "code", "execution_count": 6, "id": "c8908455-c76f-4ee2-9608-b215f6fafa7c", "metadata": {}, "outputs": [], "source": [ "# Confirm that everything worked as expected\n", "\n", "# Load from hdfs files\n", "_train_df = pd.read_hdf(\"./data.h5\", \"train\")\n", "_test_df = pd.read_hdf(\"./data.h5\", \"test\")\n", "_eval_df = pd.read_hdf(\"./data.h5\", \"eval\")\n", "\n", "assert (len(_train_df) == len(train_df)) == \\\n", " (len(_eval_df) == len(eval_df)) == \\\n", " (len(_test_df) == len(test_df))" ] }, { "cell_type": "code", "execution_count": 7, "id": "60461121-bed5-4ba0-ba7d-dd46256c62e3", "metadata": {}, "outputs": [], "source": [ "!rm hinglishNorm.json" ] } ], "metadata": { "environment": { "kernel": "python3", "name": "managed-notebooks.m87", "type": "gcloud", "uri": "gcr.io/deeplearning-platform-release/base-cu110:latest" }, "kernelspec": { "display_name": "Python (Local)", "language": "python", "name": "local-base" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.12" } }, "nbformat": 4, "nbformat_minor": 5 }