From f2ab6166d452756c44a2b494ab13659eeb654b7d Mon Sep 17 00:00:00 2001 From: Raffi Khatchadourian Date: Mon, 5 Feb 2024 16:40:07 -0500 Subject: [PATCH 1/3] Refactor to hybrid functions. --- .../notebooks/2_BasicModels/logistic_regression.ipynb | 4 ++++ .../notebooks/3_NeuralNetworks/autoencoder.ipynb | 4 ++++ .../notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb | 4 ++++ .../3_NeuralNetworks/convolutional_network.ipynb | 4 ++++ tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb | 2 ++ .../notebooks/3_NeuralNetworks/dynamic_rnn.ipynb | 4 ++++ .../notebooks/3_NeuralNetworks/neural_network.ipynb | 4 ++++ .../notebooks/3_NeuralNetworks/recurrent_network.ipynb | 4 ++++ tensorflow_v2/notebooks/4_Utils/build_custom_layers.ipynb | 5 +++++ tensorflow_v2/notebooks/4_Utils/save_restore_model.ipynb | 8 ++++++++ tensorflow_v2/notebooks/4_Utils/tensorboard.ipynb | 3 +++ 11 files changed, 46 insertions(+) diff --git a/tensorflow_v2/notebooks/2_BasicModels/logistic_regression.ipynb b/tensorflow_v2/notebooks/2_BasicModels/logistic_regression.ipynb index b9b1ccc4..32094cd7 100644 --- a/tensorflow_v2/notebooks/2_BasicModels/logistic_regression.ipynb +++ b/tensorflow_v2/notebooks/2_BasicModels/logistic_regression.ipynb @@ -98,11 +98,13 @@ "b = tf.Variable(tf.zeros([num_classes]), name=\"bias\")\n", "\n", "# Logistic regression (Wx + b).\n", + "@tf.function\n", "def logistic_regression(x):\n", " # Apply softmax to normalize the logits to a probability distribution.\n", " return tf.nn.softmax(tf.matmul(x, W) + b)\n", "\n", "# Cross-Entropy loss function.\n", + "@tf.function\n", "def cross_entropy(y_pred, y_true):\n", " # Encode label to a one hot vector.\n", " y_true = tf.one_hot(y_true, depth=num_classes)\n", @@ -112,6 +114,7 @@ " return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred),1))\n", "\n", "# Accuracy metric.\n", + "@tf.function\n", "def accuracy(y_pred, y_true):\n", " # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n", " correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n", @@ -128,6 +131,7 @@ "outputs": [], "source": [ "# Optimization process. \n", + "@tf.function\n", "def run_optimization(x, y):\n", " # Wrap computation inside a GradientTape for automatic differentiation.\n", " with tf.GradientTape() as g:\n", diff --git a/tensorflow_v2/notebooks/3_NeuralNetworks/autoencoder.ipynb b/tensorflow_v2/notebooks/3_NeuralNetworks/autoencoder.ipynb index b7c22279..22c0fa78 100644 --- a/tensorflow_v2/notebooks/3_NeuralNetworks/autoencoder.ipynb +++ b/tensorflow_v2/notebooks/3_NeuralNetworks/autoencoder.ipynb @@ -129,6 +129,7 @@ "outputs": [], "source": [ "# Building the encoder.\n", + "@tf.function\n", "def encoder(x):\n", " # Encoder Hidden layer with sigmoid activation.\n", " layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),\n", @@ -140,6 +141,7 @@ "\n", "\n", "# Building the decoder.\n", + "@tf.function\n", "def decoder(x):\n", " # Decoder Hidden layer with sigmoid activation.\n", " layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),\n", @@ -157,6 +159,7 @@ "outputs": [], "source": [ "# Mean square loss between original images and reconstructed ones.\n", + "@tf.function\n", "def mean_square(reconstructed, original):\n", " return tf.reduce_mean(tf.pow(original - reconstructed, 2))\n", "\n", @@ -171,6 +174,7 @@ "outputs": [], "source": [ "# Optimization process. \n", + "@tf.function\n", "def run_optimization(x):\n", " # Wrap computation inside a GradientTape for automatic differentiation.\n", " with tf.GradientTape() as g:\n", diff --git a/tensorflow_v2/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb b/tensorflow_v2/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb index 19cbd07b..51de09ea 100644 --- a/tensorflow_v2/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb +++ b/tensorflow_v2/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb @@ -119,6 +119,7 @@ " self.out = layers.Dense(num_classes)\n", "\n", " # Set forward pass.\n", + " @tf.function\n", " def call(self, x, is_training=False):\n", " x = self.bi_lstm(x)\n", " x = self.out(x)\n", @@ -140,6 +141,7 @@ "source": [ "# Cross-Entropy Loss.\n", "# Note that this will apply 'softmax' to the logits.\n", + "@tf.function\n", "def cross_entropy_loss(x, y):\n", " # Convert labels to int 64 for tf cross-entropy function.\n", " y = tf.cast(y, tf.int64)\n", @@ -149,6 +151,7 @@ " return tf.reduce_mean(loss)\n", "\n", "# Accuracy metric.\n", + "@tf.function\n", "def accuracy(y_pred, y_true):\n", " # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n", " correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n", @@ -165,6 +168,7 @@ "outputs": [], "source": [ "# Optimization process. \n", + "@tf.function\n", "def run_optimization(x, y):\n", " # Wrap computation inside a GradientTape for automatic differentiation.\n", " with tf.GradientTape() as g:\n", diff --git a/tensorflow_v2/notebooks/3_NeuralNetworks/convolutional_network.ipynb b/tensorflow_v2/notebooks/3_NeuralNetworks/convolutional_network.ipynb index 0bf52a43..a200cef0 100644 --- a/tensorflow_v2/notebooks/3_NeuralNetworks/convolutional_network.ipynb +++ b/tensorflow_v2/notebooks/3_NeuralNetworks/convolutional_network.ipynb @@ -126,6 +126,7 @@ " self.out = layers.Dense(num_classes)\n", "\n", " # Set forward pass.\n", + " @tf.function\n", " def call(self, x, is_training=False):\n", " x = tf.reshape(x, [-1, 28, 28, 1])\n", " x = self.conv1(x)\n", @@ -154,6 +155,7 @@ "source": [ "# Cross-Entropy Loss.\n", "# Note that this will apply 'softmax' to the logits.\n", + "@tf.function\n", "def cross_entropy_loss(x, y):\n", " # Convert labels to int 64 for tf cross-entropy function.\n", " y = tf.cast(y, tf.int64)\n", @@ -163,6 +165,7 @@ " return tf.reduce_mean(loss)\n", "\n", "# Accuracy metric.\n", + "@tf.function\n", "def accuracy(y_pred, y_true):\n", " # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n", " correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n", @@ -179,6 +182,7 @@ "outputs": [], "source": [ "# Optimization process. \n", + "@tf.function\n", "def run_optimization(x, y):\n", " # Wrap computation inside a GradientTape for automatic differentiation.\n", " with tf.GradientTape() as g:\n", diff --git a/tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb b/tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb index 763b210f..4545e0ab 100644 --- a/tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb +++ b/tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb @@ -147,6 +147,7 @@ " self.fc2 = layers.Dense(2)\n", "\n", " # Set forward pass.\n", + " @tf.function\n", " def call(self, x, is_training=False):\n", " x = tf.reshape(x, [-1, 28, 28, 1])\n", " x = self.conv1(x)\n", @@ -197,6 +198,7 @@ "outputs": [], "source": [ "# Optimization process. Inputs: real image and noise.\n", + "@tf.function\n", "def run_optimization(real_images):\n", " \n", " # Rescale to [-1, 1], the input range of the discriminator\n", diff --git a/tensorflow_v2/notebooks/3_NeuralNetworks/dynamic_rnn.ipynb b/tensorflow_v2/notebooks/3_NeuralNetworks/dynamic_rnn.ipynb index a3a6fa08..4f574c4a 100644 --- a/tensorflow_v2/notebooks/3_NeuralNetworks/dynamic_rnn.ipynb +++ b/tensorflow_v2/notebooks/3_NeuralNetworks/dynamic_rnn.ipynb @@ -141,6 +141,7 @@ " self.out = layers.Dense(num_classes)\n", "\n", " # Set forward pass.\n", + " @tf.function\n", " def call(self, x, is_training=False):\n", " # A RNN Layer expects a 3-dim input (batch_size, seq_len, num_features).\n", " x = tf.reshape(x, shape=[-1, seq_max_len, 1])\n", @@ -168,6 +169,7 @@ "source": [ "# Cross-Entropy Loss.\n", "# Note that this will apply 'softmax' to the logits.\n", + "@tf.function\n", "def cross_entropy_loss(x, y):\n", " # Convert labels to int 64 for tf cross-entropy function.\n", " y = tf.cast(y, tf.int64)\n", @@ -177,6 +179,7 @@ " return tf.reduce_mean(loss)\n", "\n", "# Accuracy metric.\n", + "@tf.function\n", "def accuracy(y_pred, y_true):\n", " # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n", " correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n", @@ -193,6 +196,7 @@ "outputs": [], "source": [ "# Optimization process. \n", + "@tf.function\n", "def run_optimization(x, y):\n", " # Wrap computation inside a GradientTape for automatic differentiation.\n", " with tf.GradientTape() as g:\n", diff --git a/tensorflow_v2/notebooks/3_NeuralNetworks/neural_network.ipynb b/tensorflow_v2/notebooks/3_NeuralNetworks/neural_network.ipynb index 9ecf0f2c..22b21695 100644 --- a/tensorflow_v2/notebooks/3_NeuralNetworks/neural_network.ipynb +++ b/tensorflow_v2/notebooks/3_NeuralNetworks/neural_network.ipynb @@ -114,6 +114,7 @@ " self.out = layers.Dense(num_classes)\n", "\n", " # Set forward pass.\n", + " @tf.function\n", " def call(self, x, is_training=False):\n", " x = self.fc1(x)\n", " x = self.fc2(x)\n", @@ -136,6 +137,7 @@ "source": [ "# Cross-Entropy Loss.\n", "# Note that this will apply 'softmax' to the logits.\n", + "@tf.function\n", "def cross_entropy_loss(x, y):\n", " # Convert labels to int 64 for tf cross-entropy function.\n", " y = tf.cast(y, tf.int64)\n", @@ -145,6 +147,7 @@ " return tf.reduce_mean(loss)\n", "\n", "# Accuracy metric.\n", + "@tf.function\n", "def accuracy(y_pred, y_true):\n", " # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n", " correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n", @@ -161,6 +164,7 @@ "outputs": [], "source": [ "# Optimization process. \n", + "@tf.function\n", "def run_optimization(x, y):\n", " # Wrap computation inside a GradientTape for automatic differentiation.\n", " with tf.GradientTape() as g:\n", diff --git a/tensorflow_v2/notebooks/3_NeuralNetworks/recurrent_network.ipynb b/tensorflow_v2/notebooks/3_NeuralNetworks/recurrent_network.ipynb index fe587f59..0e6bfb07 100644 --- a/tensorflow_v2/notebooks/3_NeuralNetworks/recurrent_network.ipynb +++ b/tensorflow_v2/notebooks/3_NeuralNetworks/recurrent_network.ipynb @@ -115,6 +115,7 @@ " self.out = layers.Dense(num_classes)\n", "\n", " # Set forward pass.\n", + " @tf.function\n", " def call(self, x, is_training=False):\n", " # LSTM layer.\n", " x = self.lstm_layer(x)\n", @@ -138,6 +139,7 @@ "source": [ "# Cross-Entropy Loss.\n", "# Note that this will apply 'softmax' to the logits.\n", + "@tf.function\n", "def cross_entropy_loss(x, y):\n", " # Convert labels to int 64 for tf cross-entropy function.\n", " y = tf.cast(y, tf.int64)\n", @@ -147,6 +149,7 @@ " return tf.reduce_mean(loss)\n", "\n", "# Accuracy metric.\n", + "@tf.function\n", "def accuracy(y_pred, y_true):\n", " # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n", " correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n", @@ -163,6 +166,7 @@ "outputs": [], "source": [ "# Optimization process. \n", + "@tf.function\n", "def run_optimization(x, y):\n", " # Wrap computation inside a GradientTape for automatic differentiation.\n", " with tf.GradientTape() as g:\n", diff --git a/tensorflow_v2/notebooks/4_Utils/build_custom_layers.ipynb b/tensorflow_v2/notebooks/4_Utils/build_custom_layers.ipynb index 760a1c9c..622f662b 100644 --- a/tensorflow_v2/notebooks/4_Utils/build_custom_layers.ipynb +++ b/tensorflow_v2/notebooks/4_Utils/build_custom_layers.ipynb @@ -108,6 +108,7 @@ " # Make sure to call the `build` method at the end\n", " super(CustomLayer1, self).build(input_shape)\n", "\n", + " @tf.function\n", " def call(self, inputs):\n", " x = tf.matmul(inputs, self.weight)\n", " x = x + self.bias\n", @@ -188,6 +189,7 @@ " self.out = layers.Dense(num_classes, activation=tf.nn.softmax)\n", "\n", " # Set forward pass.\n", + " @tf.function\n", " def __call__(self, x, is_training=False):\n", " x = self.layer1(x)\n", " x = tf.nn.relu(x)\n", @@ -209,12 +211,14 @@ "outputs": [], "source": [ "# Cross-Entropy loss function.\n", + "@tf.function\n", "def cross_entropy(y_pred, y_true):\n", " y_true = tf.cast(y_true, tf.int64)\n", " crossentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n", " return tf.reduce_mean(crossentropy)\n", "\n", "# Accuracy metric.\n", + "@tf.function\n", "def accuracy(y_pred, y_true):\n", " # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n", " correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n", @@ -231,6 +235,7 @@ "outputs": [], "source": [ "# Optimization process. \n", + "@tf.function\n", "def run_optimization(x, y):\n", " # Wrap computation inside a GradientTape for automatic differentiation.\n", " with tf.GradientTape() as g:\n", diff --git a/tensorflow_v2/notebooks/4_Utils/save_restore_model.ipynb b/tensorflow_v2/notebooks/4_Utils/save_restore_model.ipynb index 6235bbfe..7db0ef7b 100644 --- a/tensorflow_v2/notebooks/4_Utils/save_restore_model.ipynb +++ b/tensorflow_v2/notebooks/4_Utils/save_restore_model.ipynb @@ -95,11 +95,13 @@ "b = tf.Variable(tf.zeros([num_classes]), name=\"bias\")\n", "\n", "# Logistic regression (Wx + b).\n", + "@tf.function\n", "def logistic_regression(x):\n", " # Apply softmax to normalize the logits to a probability distribution.\n", " return tf.nn.softmax(tf.matmul(x, W) + b)\n", "\n", "# Cross-Entropy loss function.\n", + "@tf.function\n", "def cross_entropy(y_pred, y_true):\n", " # Encode label to a one hot vector.\n", " y_true = tf.one_hot(y_true, depth=num_classes)\n", @@ -109,6 +111,7 @@ " return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred)))\n", "\n", "# Accuracy metric.\n", + "@tf.function\n", "def accuracy(y_pred, y_true):\n", " # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n", " correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n", @@ -125,6 +128,7 @@ "outputs": [], "source": [ "# Optimization process. \n", + "@tf.function\n", "def run_optimization(x, y):\n", " # Wrap computation inside a GradientTape for automatic differentiation.\n", " with tf.GradientTape() as g:\n", @@ -380,6 +384,7 @@ " self.out = layers.Dense(num_classes, activation=tf.nn.softmax)\n", "\n", " # Set forward pass.\n", + " @tf.function\n", " def __call__(self, x, is_training=False):\n", " x = self.fc1(x)\n", " x = self.out(x)\n", @@ -400,12 +405,14 @@ "outputs": [], "source": [ "# Cross-Entropy loss function.\n", + "@tf.function\n", "def cross_entropy(y_pred, y_true):\n", " y_true = tf.cast(y_true, tf.int64)\n", " crossentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n", " return tf.reduce_mean(crossentropy)\n", "\n", "# Accuracy metric.\n", + "@tf.function\n", "def accuracy(y_pred, y_true):\n", " # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n", " correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n", @@ -422,6 +429,7 @@ "outputs": [], "source": [ "# Optimization process. \n", + "@tf.function\n", "def run_optimization(x, y):\n", " # Wrap computation inside a GradientTape for automatic differentiation.\n", " with tf.GradientTape() as g:\n", diff --git a/tensorflow_v2/notebooks/4_Utils/tensorboard.ipynb b/tensorflow_v2/notebooks/4_Utils/tensorboard.ipynb index b552d0e2..d71ed6fc 100644 --- a/tensorflow_v2/notebooks/4_Utils/tensorboard.ipynb +++ b/tensorflow_v2/notebooks/4_Utils/tensorboard.ipynb @@ -136,6 +136,7 @@ "outputs": [], "source": [ "# Cross-Entropy loss function.\n", + "@tf.function\n", "def cross_entropy(y_pred, y_true):\n", " with tf.name_scope('CrossEntropyLoss'):\n", " # Encode label to a one hot vector.\n", @@ -146,6 +147,7 @@ " return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred)))\n", "\n", "# Accuracy metric.\n", + "@tf.function\n", "def accuracy(y_pred, y_true):\n", " with tf.name_scope('Accuracy'):\n", " # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n", @@ -164,6 +166,7 @@ "outputs": [], "source": [ "# Optimization process. \n", + "@tf.function\n", "def run_optimization(x, y):\n", " # Wrap computation inside a GradientTape for automatic differentiation.\n", " with tf.GradientTape() as g:\n", From 9a06820ce5200ea85ba5f2eda7472ab18280b86d Mon Sep 17 00:00:00 2001 From: Raffi Khatchadourian Date: Fri, 24 May 2024 15:29:12 -0400 Subject: [PATCH 2/3] Two new refactorings. --- tensorflow_v2/notebooks/2_BasicModels/word2vec.ipynb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow_v2/notebooks/2_BasicModels/word2vec.ipynb b/tensorflow_v2/notebooks/2_BasicModels/word2vec.ipynb index 04b5b051..9c79938a 100644 --- a/tensorflow_v2/notebooks/2_BasicModels/word2vec.ipynb +++ b/tensorflow_v2/notebooks/2_BasicModels/word2vec.ipynb @@ -186,6 +186,7 @@ " x_embed = tf.nn.embedding_lookup(embedding, x)\n", " return x_embed\n", "\n", + "@tf.function\n", "def nce_loss(x_embed, y):\n", " with tf.device('/cpu:0'):\n", " # Compute the average NCE loss for the batch.\n", @@ -200,6 +201,7 @@ " return loss\n", "\n", "# Evaluation.\n", + "@tf.function.\n", "def evaluate(x_embed):\n", " with tf.device('/cpu:0'):\n", " # Compute the cosine similarity between input data embedding and every embedding vectors\n", From 035193a9824d375360d0bf4456d46f4c8510c06c Mon Sep 17 00:00:00 2001 From: Raffi Khatchadourian Date: Fri, 26 Jul 2024 15:46:04 -0400 Subject: [PATCH 3/3] Add new refactoring. --- tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb b/tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb index 4545e0ab..635a5516 100644 --- a/tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb +++ b/tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb @@ -113,6 +113,7 @@ " self.conv2tr2 = layers.Conv2DTranspose(1, 5, strides=2, padding='SAME')\n", "\n", " # Set forward pass.\n", + " @tf.function\n", " def call(self, x, is_training=False):\n", " x = self.fc1(x)\n", " x = self.bn1(x, training=is_training)\n",