File size: 3,875 Bytes
d9b9366
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "666aeda0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Archive:  /user/bhanucha/recipe_data.zip\n",
      "   creating: /user/bhanucha/data/dataset/\n",
      "  inflating: /user/bhanucha/data/dataset/full_dataset.csv  \n"
     ]
    }
   ],
   "source": [
    "!unzip '/user/bhanucha/recipe_data.zip' -d '/user/bhanucha/data'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "7a4b917b",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-04-20 19:46:56.235357: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX512_VNNI\n",
      "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "2024-04-20 19:47:03.051474: I tensorflow/core/util/port.cc:104] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from transformers import BartTokenizer\n",
    "from tqdm import tqdm \n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from transformers import TFBartForConditionalGeneration\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "84ebecc4",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_checkpoint = \"facebook/bart-base\"\n",
    "tokenizer = BartTokenizer.from_pretrained(model_checkpoint)\n",
    "if tokenizer.pad_token is None:\n",
    "    tokenizer.add_special_tokens({'pad_token': tokenizer.eos_token})\n",
    "\n",
    "\n",
    "data = pd.read_csv('/user/bhanucha/data/dataset/full_dataset.csv')\n",
    "\n",
    "\n",
    "texts = [\"Ingredients: \" + row['ingredients'] + \" Directions: \" + row['directions'] for _, row in data.iterrows()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "5597cc7e",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Tokenizing Data: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2231142/2231142 [43:29<00:00, 854.85it/s]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "tokenized_inputs = []\n",
    "for texts_text in tqdm(texts, desc=\"Tokenizing Data\"):\n",
    "    tokenized_input = tokenizer(\n",
    "        texts_text,\n",
    "        padding=\"max_length\",\n",
    "        truncation=True,\n",
    "        max_length=512,\n",
    "        return_tensors=\"np\"\n",
    "    )\n",
    "    tokenized_inputs.append(tokenized_input['input_ids'])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "00678266",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "train_data = np.concatenate(tokenized_inputs, axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "56fcd8cd",
   "metadata": {},
   "outputs": [],
   "source": [
    "np.save('/user/bhanucha/train_data.npy', train_data)"
   ]
  }
 ],
 "metadata": {
  "celltoolbar": "Attachments",
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}