{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Machine Translation Project (English to Spanish)" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import pathlib\n", "import random\n", "import string\n", "import tensorflow.strings as tf_strings\n", "import tensorflow.data as tf_data\n", "import re\n", "from keras.layers import TextVectorization\n", "import keras\n", "import tensorflow as tf\n", "from keras import layers\n", "import json" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Verify access to the GPU" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[name: \"/device:CPU:0\"\n", "device_type: \"CPU\"\n", "memory_limit: 268435456\n", "locality {\n", "}\n", "incarnation: 16791471205212918184\n", "xla_global_id: -1\n", ", name: \"/device:GPU:0\"\n", "device_type: \"GPU\"\n", "memory_limit: 1733715559\n", "locality {\n", " bus_id: 1\n", " links {\n", " }\n", "}\n", "incarnation: 6643307082616730570\n", "physical_device_desc: \"device: 0, name: NVIDIA GeForce RTX 2050, pci bus id: 0000:01:00.0, compute capability: 8.6\"\n", "xla_global_id: 416903419\n", "]\n" ] } ], "source": [ "from tensorflow.python.client import device_lib\n", "print(device_lib.list_local_devices())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Download and prepare the data\n", "source :\"http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip\"" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Downloading data from http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip\n", "2638744/2638744 [==============================] - 11s 4us/step\n" ] } ], "source": [ "text_file = keras.utils.get_file(\n", " fname = \"spa-eng.zip\",\n", " origin = \"http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip\",\n", " extract = True,\n", ")\n", "\n", "text_file = pathlib.Path(text_file).parent / \"spa-eng\" / \"spa.txt\"\n", "\n", "with open(text_file, \"r\") as f:\n", " lines = f.read().split(\"\\n\")[:-1]\n", " \n", "text_pairs = []\n", "\n", "for line in lines:\n", " eng, spa = line.split(\"\\t\")\n", " spa = \"[start] \" + spa + \" [end]\"\n", " text_pairs.append((eng, spa))" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "random.shuffle(text_pairs)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "('Please remind me to phone him tomorrow.', '[start] Por favor, recordadme que le llame mañana. [end]')\n", "('These apples taste good.', '[start] Estas manzanas están buenas. [end]')\n", "('Tom is on his own now.', '[start] Tom es independiente ahora. [end]')\n", "('Hey, you want to have a lot of fun? Come with us.', '[start] Oye, ¿quieres entretenerte?, acompáñanos. [end]')\n", "('You have to remain detached.', '[start] Tú tienes que permanecer independiente. [end]')\n" ] } ], "source": [ "for i in range(5):\n", " print(text_pairs[i])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Structure of the Dataset" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "118964 total pairs\n", "83276 training pairs\n", "17844 validation pairs\n", "17844 test pairs\n" ] } ], "source": [ "num_val_samples = int(0.15 * len(text_pairs))\n", "num_train_samples = len(text_pairs) - 2 * num_val_samples\n", "train_pairs = text_pairs[:num_train_samples]\n", "val_pairs = text_pairs[num_train_samples:num_train_samples + num_val_samples]\n", "test_pairs = text_pairs[num_train_samples + num_val_samples:]\n", "\n", "print(f\"{len(text_pairs)} total pairs\")\n", "print(f\"{len(train_pairs)} training pairs\")\n", "print(f\"{len(val_pairs)} validation pairs\")\n", "print(f\"{len(test_pairs)} test pairs\")" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "# parameters\n", "strip_chars = string.punctuation + \"¿\"\n", "strip_chars = strip_chars.replace(\"[\", \"\")\n", "strip_chars = strip_chars.replace(\"]\", \"\")\n", "\n", "vocab_size = 15000\n", "sequence_length = 20\n", "batch_size = 64" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Vectorize the data" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "def custom_standardization(input_string):\n", " lowercase = tf_strings.lower(input_string)\n", " return tf_strings.regex_replace(lowercase, f\"[{re.escape(strip_chars)}]\", \"\")\n", "\n", "# vectorization\n", "eng_vectorization = TextVectorization(\n", " max_tokens = vocab_size,\n", " output_mode = \"int\",\n", " output_sequence_length = sequence_length,\n", ")\n", "\n", "spa_vectorization = TextVectorization(\n", " max_tokens = vocab_size,\n", " output_mode = \"int\",\n", " output_sequence_length = sequence_length + 1,\n", " standardize = custom_standardization,\n", ")\n", "\n", "train_eng_texts = [pair[0] for pair in train_pairs]\n", "train_spa_texts = [pair[1] for pair in train_pairs]\n", "\n", "eng_vectorization.adapt(train_eng_texts)\n", "spa_vectorization.adapt(train_spa_texts)\n", "\n", "#save the vectorization layers\n", "eng_vectorization_config = eng_vectorization.get_config()\n", "eng_vectorization_config.pop('standardize', None)\n", "eng_vocab = eng_vectorization.get_vocabulary()\n", "with open('eng_vectorization_config.json', 'w', encoding='utf-8') as f:\n", " json.dump(eng_vectorization_config, f)\n", " \n", "with open('eng_vocab.json', 'w', encoding='utf-8') as f:\n", " json.dump(eng_vocab, f)\n", " \n", "spa_vectorization_config = spa_vectorization.get_config()\n", "spa_vectorization_config.pop('standardize', None)\n", "spa_vocab = spa_vectorization.get_vocabulary()\n", "with open('spa_vectorization_config.json', 'w', encoding='utf-8') as f:\n", " json.dump(spa_vectorization_config, f)\n", " \n", "with open('spa_vocab.json', 'w', encoding='utf-8') as f:\n", " json.dump(spa_vocab, f)\n", " \n", "\n", "def format_dataset(eng, spa):\n", " eng = eng_vectorization(eng)\n", " spa = spa_vectorization(spa)\n", " return (\n", " {\n", " \"encoder_inputs\": eng,\n", " \"decoder_inputs\": spa[:, :-1],\n", " },\n", " spa[:, 1:],\n", " )\n", " \n", "def make_dataset(pairs):\n", " eng_texts, spa_texts = zip(*pairs)\n", " eng_texts = list(eng_texts)\n", " spa_texts = list(spa_texts)\n", " dataset = tf_data.Dataset.from_tensor_slices((eng_texts, spa_texts))\n", " dataset = dataset.batch(batch_size)\n", " dataset = dataset.map(format_dataset)\n", " return dataset.cache().shuffle(2048).prefetch(16)\n", "\n", "train_ds = make_dataset(train_pairs)\n", "val_ds = make_dataset(val_pairs)\n", " " ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(64, 20)\n", "(64, 20)\n" ] } ], "source": [ "for inputs,targets in train_ds.take(1):\n", " print(inputs[\"encoder_inputs\"].shape)\n", " print(targets.shape)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Model Architecture\n", "![Encoder-Decoder](images/encoder-decoder-context.png)\n", "![Encoder-Decoder](images/encoder-decoder-translation.png)\n", "![Attention Mechanism](images/attention.png)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "# Creating an Encoder\n", "class TransformerEncoder(layers.Layer):\n", " def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):\n", " super().__init__(**kwargs)\n", " self.embed_dim = embed_dim\n", " self.dense_dim = dense_dim\n", " self.num_heads = num_heads\n", " self.attention = layers.MultiHeadAttention(\n", " num_heads = num_heads, key_dim = embed_dim\n", " )\n", " self.dense_proj = keras.Sequential(\n", " [\n", " layers.Dense(dense_dim, activation = \"relu\"),\n", " layers.Dense(embed_dim),\n", " ]\n", " )\n", " self.layernorm_1 = layers.LayerNormalization()\n", " self.layernorm_2 = layers.LayerNormalization()\n", " self.supports_masking = True\n", " \n", " def call(self, inputs, mask=None):\n", " if mask is not None:\n", " padding_mask = tf.cast(mask[:, None, :], dtype = tf.int32)\n", " else:\n", " padding_mask = None\n", " \n", " attention_output = self.attention(\n", " query = inputs,\n", " value = inputs,\n", " key = inputs,\n", " attention_mask = padding_mask,\n", " )\n", " proj_input = self.layernorm_1(inputs + attention_output)\n", " proj_output = self.dense_proj(proj_input)\n", " return self.layernorm_2(proj_input + proj_output)\n", " \n", " def get_config(self):\n", " config = super().get_config()\n", " config.update({\n", " \"embed_dim\": self.embed_dim,\n", " \"dense_dim\": self.dense_dim,\n", " \"num_heads\": self.num_heads,\n", " })\n", " return config\n", " \n", "# Creating a Positional Embedding\n", "class PositionalEmbedding(layers.Layer):\n", " def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs):\n", " super().__init__(**kwargs)\n", " self.token_embeddings = layers.Embedding(\n", " input_dim = vocab_size, output_dim = embed_dim\n", " )\n", " self.position_embeddings = layers.Embedding(\n", " input_dim = sequence_length, output_dim = embed_dim\n", " )\n", " self.sequence_length = sequence_length\n", " self.vocab_size = vocab_size\n", " self.embed_dim = embed_dim\n", " \n", " def call(self, inputs):\n", " length = tf.shape(inputs)[-1]\n", " positions = tf.range(start = 0, limit = length, delta = 1)\n", " embedded_tokens = self.token_embeddings(inputs)\n", " embedded_positions = self.position_embeddings(positions)\n", " return embedded_tokens + embedded_positions\n", " \n", " def compute_mask(self, inputs, mask=None):\n", " if mask is not None:\n", " return tf.not_equal(inputs, 0)\n", " else:\n", " return None\n", " \n", " def get_config(self):\n", " config = super().get_config()\n", " config.update({\n", " \"vocab_size\": self.vocab_size,\n", " \"sequence_length\": self.sequence_length,\n", " \"embed_dim\": self.embed_dim,\n", " })\n", " return config\n", " \n", "# Creating a Decoder\n", "class TransformerDecoder(layers.Layer):\n", " def __init__(self, embed_dim, latent_dim, num_heads, **kwargs):\n", " super().__init__(**kwargs)\n", " self.embed_dim = embed_dim\n", " self.latent_dim = latent_dim\n", " self.num_heads = num_heads\n", " self.attention_1 = layers.MultiHeadAttention(\n", " num_heads = num_heads, key_dim = embed_dim\n", " )\n", " self.attention_2 = layers.MultiHeadAttention(\n", " num_heads = num_heads, key_dim = embed_dim\n", " )\n", " self.dense_proj = keras.Sequential(\n", " [\n", " layers.Dense(latent_dim, activation = \"relu\"),\n", " layers.Dense(embed_dim),\n", " ]\n", " )\n", " self.layernorm_1 = layers.LayerNormalization()\n", " self.layernorm_2 = layers.LayerNormalization()\n", " self.layernorm_3 = layers.LayerNormalization()\n", " self.supports_masking = True\n", " \n", " def call(self, inputs, encoder_outputs, mask=None):\n", " casual_mask = self.get_causal_attention_mask(inputs)\n", " if mask is not None:\n", " padding_mask = tf.cast(mask[:, None, :], dtype = tf.int32)\n", " padding_mask = tf.minimum(padding_mask, casual_mask)\n", " else:\n", " padding_mask = None\n", " \n", " attention_output_1 = self.attention_1(\n", " query = inputs,\n", " value = inputs,\n", " key = inputs,\n", " attention_mask = casual_mask,\n", " )\n", " out_1 = self.layernorm_1(inputs + attention_output_1)\n", " \n", " attention_output_2 = self.attention_2(\n", " query = out_1,\n", " value = encoder_outputs,\n", " key = encoder_outputs,\n", " attention_mask = padding_mask,\n", " )\n", " \n", " out_2 = self.layernorm_2(out_1 + attention_output_2)\n", " proj_output = self.dense_proj(out_2)\n", " \n", " return self.layernorm_3(out_2 + proj_output)\n", " \n", " def get_causal_attention_mask(self, inputs):\n", " input_shape = tf.shape(inputs)\n", " batch_size, sequence_length = input_shape[0], input_shape[1]\n", " i = tf.range(sequence_length)[:, None]\n", " j = tf.range(sequence_length)\n", " mask = tf.cast(i >= j, tf.int32)\n", " mask = tf.reshape(mask,(1, input_shape[1], input_shape[1]))\n", " mult = tf.concat(\n", " [\n", " tf.expand_dims(batch_size, -1),\n", " tf.convert_to_tensor([1, 1]),\n", " ],\n", " axis = 0,\n", " )\n", " return tf.tile(mask, mult)\n", " \n", " def get_config(self):\n", " config = super().get_config()\n", " config.update({\n", " \"embed_dim\": self.embed_dim,\n", " \"latent_dim\": self.latent_dim,\n", " \"num_heads\": self.num_heads,\n", " })\n", " return config\n" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "# define emmbedding dimensions, latent dimensions, and number of heads\n", "embed_dim = 256\n", "latent_dim = 2048\n", "num_heads = 8\n", "\n", "#Encoder\n", "encoder_inputs = keras.Input(shape = (None,), dtype = \"int64\", name = \"encoder_inputs\")\n", "\n", "x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(encoder_inputs)\n", "\n", "encoder_outputs = TransformerEncoder(embed_dim, latent_dim, num_heads)(x)\n", "\n", "encoder = keras.Model(encoder_inputs, encoder_outputs, name = \"encoder\")\n", "\n", "#Decoder\n", "decoder_inputs = keras.Input(shape = (None,), dtype = \"int64\", name = \"decoder_inputs\")\n", "encoder_seq_inputs = keras.Input(shape = (None, embed_dim), name = \"encoder_seq_inputs\")\n", "\n", "x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(decoder_inputs)\n", "\n", "x = TransformerDecoder(embed_dim, latent_dim, num_heads)(x, encoder_seq_inputs)\n", "\n", "x = layers.Dropout(0.5)(x)\n", "\n", "decoder_outputs = layers.Dense(vocab_size, activation = \"softmax\")(x)\n", "\n", "decoder = keras.Model([decoder_inputs, encoder_seq_inputs], decoder_outputs, name = \"decoder\")\n", "\n", "# Define the final model\n", "decoder_outputs = decoder([decoder_inputs, encoder_outputs])\n", "\n", "transformer = keras.Model(\n", " [encoder_inputs, decoder_inputs], decoder_outputs, name = \"transformer\"\n", ")\n" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Model: \"transformer\"\n", "__________________________________________________________________________________________________\n", " Layer (type) Output Shape Param # Connected to \n", "==================================================================================================\n", " encoder_inputs (InputLayer) [(None, None)] 0 [] \n", " \n", " positional_embedding (Position (None, None, 256) 3845120 ['encoder_inputs[0][0]'] \n", " alEmbedding) \n", " \n", " decoder_inputs (InputLayer) [(None, None)] 0 [] \n", " \n", " transformer_encoder (Transform (None, None, 256) 3155456 ['positional_embedding[0][0]'] \n", " erEncoder) \n", " \n", " decoder (Functional) (None, None, 15000) 12959640 ['decoder_inputs[0][0]', \n", " 'transformer_encoder[0][0]'] \n", " \n", "==================================================================================================\n", "Total params: 19,960,216\n", "Trainable params: 19,960,216\n", "Non-trainable params: 0\n", "__________________________________________________________________________________________________\n", "Epoch 1/20\n", "1302/1302 [==============================] - 132s 95ms/step - loss: 1.9854 - accuracy: 0.7200 - val_loss: 1.7019 - val_accuracy: 0.7395\n", "Epoch 2/20\n", "1302/1302 [==============================] - 123s 94ms/step - loss: 1.7172 - accuracy: 0.7499 - val_loss: 1.5479 - val_accuracy: 0.7610\n", "Epoch 3/20\n", "1302/1302 [==============================] - 123s 95ms/step - loss: 1.5636 - accuracy: 0.7712 - val_loss: 1.4142 - val_accuracy: 0.7838\n", "Epoch 4/20\n", "1302/1302 [==============================] - 124s 96ms/step - loss: 1.4229 - accuracy: 0.7906 - val_loss: 1.2981 - val_accuracy: 0.8027\n", "Epoch 5/20\n", "1302/1302 [==============================] - 125s 96ms/step - loss: 1.3049 - accuracy: 0.8079 - val_loss: 1.2207 - val_accuracy: 0.8173\n", "Epoch 6/20\n", "1302/1302 [==============================] - 125s 96ms/step - loss: 1.2307 - accuracy: 0.8232 - val_loss: 1.1703 - val_accuracy: 0.8294\n", "Epoch 7/20\n", "1302/1302 [==============================] - 125s 96ms/step - loss: 1.1852 - accuracy: 0.8348 - val_loss: 1.1304 - val_accuracy: 0.8376\n", "Epoch 8/20\n", "1302/1302 [==============================] - 124s 95ms/step - loss: 1.1455 - accuracy: 0.8431 - val_loss: 1.1064 - val_accuracy: 0.8436\n", "Epoch 9/20\n", "1302/1302 [==============================] - 124s 95ms/step - loss: 1.1154 - accuracy: 0.8496 - val_loss: 1.0878 - val_accuracy: 0.8461\n", "Epoch 10/20\n", "1302/1302 [==============================] - 125s 96ms/step - loss: 1.0901 - accuracy: 0.8545 - val_loss: 1.0737 - val_accuracy: 0.8498\n", "Epoch 11/20\n", "1302/1302 [==============================] - 124s 95ms/step - loss: 1.0690 - accuracy: 0.8583 - val_loss: 1.0697 - val_accuracy: 0.8472\n", "Epoch 12/20\n", "1302/1302 [==============================] - 124s 95ms/step - loss: 1.0495 - accuracy: 0.8616 - val_loss: 1.0458 - val_accuracy: 0.8543\n", "Epoch 13/20\n", "1302/1302 [==============================] - 124s 95ms/step - loss: 1.0332 - accuracy: 0.8648 - val_loss: 1.0387 - val_accuracy: 0.8548\n", "Epoch 14/20\n", "1302/1302 [==============================] - 123s 95ms/step - loss: 1.0180 - accuracy: 0.8673 - val_loss: 1.0458 - val_accuracy: 0.8550\n", "Epoch 15/20\n", "1302/1302 [==============================] - 125s 96ms/step - loss: 1.0036 - accuracy: 0.8695 - val_loss: 1.0303 - val_accuracy: 0.8569\n", "Epoch 16/20\n", "1302/1302 [==============================] - 125s 96ms/step - loss: 0.9891 - accuracy: 0.8720 - val_loss: 1.0184 - val_accuracy: 0.8586\n", "Epoch 17/20\n", "1302/1302 [==============================] - 125s 96ms/step - loss: 0.9779 - accuracy: 0.8738 - val_loss: 1.0313 - val_accuracy: 0.8567\n", "Epoch 18/20\n", "1302/1302 [==============================] - 126s 96ms/step - loss: 0.9668 - accuracy: 0.8754 - val_loss: 1.0106 - val_accuracy: 0.8614\n", "Epoch 19/20\n", "1302/1302 [==============================] - 125s 96ms/step - loss: 0.9543 - accuracy: 0.8772 - val_loss: 1.0144 - val_accuracy: 0.8598\n", "Epoch 20/20\n", "1302/1302 [==============================] - 124s 95ms/step - loss: 0.9415 - accuracy: 0.8791 - val_loss: 1.0139 - val_accuracy: 0.8617\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "epochs = 20\n", "\n", "transformer.summary()\n", "\n", "transformer.compile(\n", " \"rmsprop\", loss = \"sparse_categorical_crossentropy\", metrics = [\"accuracy\"]\n", ")\n", "\n", "transformer.fit(train_ds, epochs = epochs, validation_data = val_ds)" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Found untraced functions such as embedding_layer_call_fn, embedding_layer_call_and_return_conditional_losses, embedding_1_layer_call_fn, embedding_1_layer_call_and_return_conditional_losses, multi_head_attention_layer_call_fn while saving (showing 5 of 60). These functions will not be directly callable after loading.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Assets written to: transformer_model\\assets\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:tensorflow:Assets written to: transformer_model\\assets\n" ] } ], "source": [ "transformer.save(\"transformer_model\")" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "input: a few passengers went on board the plane\n", "translated: [start] unos dos te [UNK] en los años [end]\n", "\n", "input: i think shes an honest woman\n", "translated: [start] creo que es una mujer [UNK] [end]\n", "\n", "input: im old enough to do that on my own\n", "translated: [start] soy lo suficientemente viejo para hacer por mi [end]\n", "\n", "input: youre too drunk to drive\n", "translated: [start] eres demasiado de conducir [end]\n", "\n", "input: id like to go to hawaii\n", "translated: [start] quisiera ir a china [end]\n", "\n" ] } ], "source": [ "spa_vocab = spa_vectorization.get_vocabulary()\n", "spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab))\n", "max_decoded_sentence_length = sequence_length\n", "\n", "def decode_sentence(input_sentence):\n", " tokenized_input_sentence = eng_vectorization([input_sentence])\n", " decoded_sentence = \"[start]\"\n", " for i in range(max_decoded_sentence_length):\n", " tokenized_target_sentence = spa_vectorization([decoded_sentence])[:, :-1]\n", " predictions = transformer([tokenized_input_sentence, tokenized_target_sentence])\n", " sampled_token_index = tf.argmax(predictions[0, i, :]).numpy().item(0)\n", " sampled_token = spa_index_lookup[sampled_token_index]\n", " decoded_sentence += \" \" + sampled_token\n", " if sampled_token == \"[end]\":\n", " break\n", " return decoded_sentence\n", "\n", "test_eng_texts = [pair[0] for pair in test_pairs]\n", "for _ in range(5):\n", " input_sentence = random.choice(test_eng_texts)\n", " input_sentence = input_sentence.lower()\n", " input_sentence = input_sentence.translate(str.maketrans('', '', strip_chars))\n", " translated = decode_sentence(input_sentence)\n", " print(f\"input: {input_sentence}\")\n", " print(f\"translated: {translated}\")\n", " print()" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: tensorflow in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (2.10.1)Note: you may need to restart the kernel to use updated packages.\n", "\n", "Requirement already satisfied: absl-py>=1.0.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (2.1.0)\n", "Requirement already satisfied: astunparse>=1.6.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (1.6.3)\n", "Requirement already satisfied: flatbuffers>=2.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (24.3.25)\n", "Requirement already satisfied: gast<=0.4.0,>=0.2.1 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (0.4.0)\n", "Requirement already satisfied: google-pasta>=0.1.1 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (0.2.0)\n", "Requirement already satisfied: h5py>=2.9.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (3.11.0)\n", "Requirement already satisfied: keras-preprocessing>=1.1.1 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (1.1.2)\n", "Requirement already satisfied: libclang>=13.0.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (18.1.1)\n", "Requirement already satisfied: numpy>=1.20 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (1.26.4)\n", "Requirement already satisfied: opt-einsum>=2.3.2 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (3.3.0)\n", "Requirement already satisfied: packaging in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (24.0)\n", "Requirement already satisfied: protobuf<3.20,>=3.9.2 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (3.19.6)\n", "Requirement already satisfied: setuptools in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (69.5.1)\n", "Requirement already satisfied: six>=1.12.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (1.16.0)\n", "Requirement already satisfied: termcolor>=1.1.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (2.4.0)\n", "Requirement already satisfied: typing-extensions>=3.6.6 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (4.12.2)\n", "Requirement already satisfied: wrapt>=1.11.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (1.16.0)\n", "Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (0.31.0)\n", "Requirement already satisfied: grpcio<2.0,>=1.24.3 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (1.64.1)\n", "Requirement already satisfied: tensorboard<2.11,>=2.10 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (2.10.1)\n", "Requirement already satisfied: tensorflow-estimator<2.11,>=2.10.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (2.10.0)\n", "Requirement already satisfied: keras<2.11,>=2.10.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorflow) (2.10.0)\n", "Requirement already satisfied: wheel<1.0,>=0.23.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from astunparse>=1.6.0->tensorflow) (0.43.0)\n", "Requirement already satisfied: google-auth<3,>=1.6.3 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorboard<2.11,>=2.10->tensorflow) (2.30.0)\n", "Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorboard<2.11,>=2.10->tensorflow) (0.4.6)\n", "Requirement already satisfied: markdown>=2.6.8 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorboard<2.11,>=2.10->tensorflow) (3.6)\n", "Requirement already satisfied: requests<3,>=2.21.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorboard<2.11,>=2.10->tensorflow) (2.32.3)\n", "Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorboard<2.11,>=2.10->tensorflow) (0.6.1)\n", "Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorboard<2.11,>=2.10->tensorflow) (1.8.1)\n", "Requirement already satisfied: werkzeug>=1.0.1 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from tensorboard<2.11,>=2.10->tensorflow) (3.0.3)\n", "Requirement already satisfied: cachetools<6.0,>=2.0.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.11,>=2.10->tensorflow) (5.3.3)\n", "Requirement already satisfied: pyasn1-modules>=0.2.1 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.11,>=2.10->tensorflow) (0.4.0)\n", "Requirement already satisfied: rsa<5,>=3.1.4 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from google-auth<3,>=1.6.3->tensorboard<2.11,>=2.10->tensorflow) (4.9)\n", "Requirement already satisfied: requests-oauthlib>=0.7.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.11,>=2.10->tensorflow) (2.0.0)\n", "Requirement already satisfied: importlib-metadata>=4.4 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from markdown>=2.6.8->tensorboard<2.11,>=2.10->tensorflow) (7.1.0)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.11,>=2.10->tensorflow) (3.3.2)\n", "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.11,>=2.10->tensorflow) (3.7)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.11,>=2.10->tensorflow) (2.2.1)\n", "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.11,>=2.10->tensorflow) (2024.6.2)\n", "Requirement already satisfied: MarkupSafe>=2.1.1 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from werkzeug>=1.0.1->tensorboard<2.11,>=2.10->tensorflow) (2.1.5)\n", "Requirement already satisfied: zipp>=0.5 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard<2.11,>=2.10->tensorflow) (3.19.2)\n", "Requirement already satisfied: pyasn1<0.7.0,>=0.4.6 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard<2.11,>=2.10->tensorflow) (0.6.0)\n", "Requirement already satisfied: oauthlib>=3.0.0 in c:\\users\\prajw\\anaconda3\\envs\\testnullclass\\lib\\site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.11,>=2.10->tensorflow) (3.2.2)\n" ] } ], "source": [ "pip install tensorflow\n" ] } ], "metadata": { "kernelspec": { "display_name": "base", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.19" } }, "nbformat": 4, "nbformat_minor": 2 }