10
10
" <td align=\" center\" ><a target=\" _blank\" href=\" http://introtodeeplearning.com\" >\n " ,
11
11
" <img src=\" https://i.ibb.co/Jr88sn2/mit.png\" style=\" padding-bottom:5px;\" />\n " ,
12
12
" Visit MIT Deep Learning</a></td>\n " ,
13
- " <td align=\" center\" ><a target=\" _blank\" href=\" https://colab.research.google.com/github/aamini/introtodeeplearning/blob/master/lab2/solutions/TF_Part1_MNIST_Solution .ipynb\" >\n " ,
13
+ " <td align=\" center\" ><a target=\" _blank\" href=\" https://colab.research.google.com/github/aamini/introtodeeplearning/blob/master/lab2/TF_Part1_MNIST .ipynb\" >\n " ,
14
14
" <img src=\" https://i.ibb.co/2P3SLwK/colab.png\" style=\" padding-bottom:5px;\" />Run in Google Colab</a></td>\n " ,
15
- " <td align=\" center\" ><a target=\" _blank\" href=\" https://github.com/aamini/introtodeeplearning/blob/master/lab2/solutions/TF_Part1_MNIST_Solution .ipynb\" >\n " ,
15
+ " <td align=\" center\" ><a target=\" _blank\" href=\" https://github.com/aamini/introtodeeplearning/blob/master/lab2/TF_Part1_MNIST .ipynb\" >\n " ,
16
16
" <img src=\" https://i.ibb.co/xfJbPmL/github.png\" height=\" 70px\" style=\" padding-bottom:5px;\" />View Source on GitHub</a></td>\n " ,
17
17
" </table>\n " ,
18
18
" \n " ,
216
216
" tf.keras.layers.Flatten(),\n " ,
217
217
" \n " ,
218
218
" # '''TODO: Define the activation function for the first fully connected (Dense) layer.'''\n " ,
219
- " tf.keras.layers.Dense(128, activation=tf.nn.relu),\n " ,
220
- " # tf.keras.layers.Dense(128, activation= '''TODO'''),\n " ,
219
+ " tf.keras.layers.Dense(128, activation= '''TODO'''),\n " ,
221
220
" \n " ,
222
221
" # '''TODO: Define the second Dense layer to output the classification probabilities'''\n " ,
223
- " tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n " ,
224
- " # [TODO Dense layer to output classification probabilities]\n " ,
222
+ " '''[TODO Dense layer to output classification probabilities]'''\n " ,
225
223
" \n " ,
226
224
" ])\n " ,
227
225
" return fc_model\n " ,
348
346
"outputs" : [],
349
347
"source" : [
350
348
" '''TODO: Use the evaluate method to test the model!'''\n " ,
351
- " test_loss, test_acc = model.evaluate(test_images, test_labels) # TODO\n " ,
352
- " # test_loss, test_acc = # TODO\n " ,
349
+ " test_loss, test_acc = # TODO\n " ,
353
350
" \n " ,
354
351
" print('Test accuracy:', test_acc)"
355
352
]
410
407
" cnn_model = tf.keras.Sequential([\n " ,
411
408
" \n " ,
412
409
" # TODO: Define the first convolutional layer\n " ,
413
- " tf.keras.layers.Conv2D(filters=24, kernel_size=(3,3), activation=tf.nn.relu),\n " ,
414
- " # tf.keras.layers.Conv2D('''TODO''')\n " ,
410
+ " tf.keras.layers.Conv2D('''TODO''')\n " ,
415
411
" \n " ,
416
412
" # TODO: Define the first max pooling layer\n " ,
417
- " tf.keras.layers.MaxPool2D(pool_size=(2,2)),\n " ,
418
- " # tf.keras.layers.MaxPool2D('''TODO''')\n " ,
413
+ " tf.keras.layers.MaxPool2D('''TODO''')\n " ,
419
414
" \n " ,
420
415
" # TODO: Define the second convolutional layer\n " ,
421
- " tf.keras.layers.Conv2D(filters=36, kernel_size=(3,3), activation=tf.nn.relu),\n " ,
422
- " # tf.keras.layers.Conv2D('''TODO''')\n " ,
416
+ " tf.keras.layers.Conv2D('''TODO''')\n " ,
423
417
" \n " ,
424
418
" # TODO: Define the second max pooling layer\n " ,
425
- " tf.keras.layers.MaxPool2D(pool_size=(2,2)),\n " ,
426
- " # tf.keras.layers.MaxPool2D('''TODO''')\n " ,
419
+ " tf.keras.layers.MaxPool2D('''TODO''')\n " ,
427
420
" \n " ,
428
421
" tf.keras.layers.Flatten(),\n " ,
429
422
" tf.keras.layers.Dense(128, activation=tf.nn.relu),\n " ,
430
423
" \n " ,
431
424
" # TODO: Define the last Dense layer to output the classification\n " ,
432
425
" # probabilities. Pay attention to the activation needed a probability\n " ,
433
426
" # output\n " ,
434
- " tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n " ,
435
- " # [TODO Dense layer to output classification probabilities]\n " ,
427
+ " '''[TODO Dense layer to output classification probabilities]'''\n " ,
436
428
" ])\n " ,
437
429
" \n " ,
438
430
" return cnn_model\n " ,
467
459
" comet_model_2 = comet_ml.Experiment()\n " ,
468
460
" \n " ,
469
461
" '''TODO: Define the compile operation with your optimizer and learning rate of choice'''\n " ,
470
- " cnn_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),\n " ,
471
- " loss='sparse_categorical_crossentropy',\n " ,
472
- " metrics=['accuracy'])\n " ,
473
- " # cnn_model.compile(optimizer='''TODO''', loss='''TODO''', metrics=['accuracy']) # TODO"
462
+ " cnn_model.compile(optimizer='''TODO''', loss='''TODO''', metrics=['accuracy']) # TODO"
474
463
]
475
464
},
476
465
{
491
480
"outputs" : [],
492
481
"source" : [
493
482
" '''TODO: Use model.fit to train the CNN model, with the same batch_size and number of epochs previously used.'''\n " ,
494
- " cnn_model.fit(train_images, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS)\n " ,
495
- " # cnn_model.fit('''TODO''')\n " ,
483
+ " cnn_model.fit('''TODO''')\n " ,
496
484
" # comet_model_2.end()"
497
485
]
498
486
},
514
502
"outputs" : [],
515
503
"source" : [
516
504
" '''TODO: Use the evaluate method to test the model!'''\n " ,
517
- " test_loss, test_acc = cnn_model.evaluate(test_images, test_labels)\n " ,
518
- " # test_loss, test_acc = # TODO\n " ,
505
+ " test_loss, test_acc = # TODO\n " ,
519
506
" \n " ,
520
507
" print('Test accuracy:', test_acc)"
521
508
]
594
581
"source" : [
595
582
" '''TODO: identify the digit with the highest confidence prediction for the first\n " ,
596
583
" image in the test dataset. '''\n " ,
597
- " prediction = np.argmax(predictions[0])\n " ,
598
- " # prediction = # TODO\n " ,
584
+ " prediction = # TODO\n " ,
599
585
" \n " ,
600
586
" print(prediction)"
601
587
]
725
711
" # GradientTape to record differentiation operations\n " ,
726
712
" with tf.GradientTape() as tape:\n " ,
727
713
" #'''TODO: feed the images into the model and obtain the predictions'''\n " ,
728
- " logits = cnn_model(images)\n " ,
729
- " # logits = # TODO\n " ,
714
+ " logits = # TODO\n " ,
730
715
" \n " ,
731
716
" #'''TODO: compute the categorical cross entropy loss\n " ,
732
- " loss_value = tf.keras.backend.sparse_categorical_crossentropy(labels, logits) \n " ,
717
+ " loss_value = tf.keras.backend.sparse_categorical_crossentropy('''TODO''', '''TODO''') # TODO \n " ,
733
718
" comet_model_3.log_metric(\" loss\" , loss_value.numpy().mean(), step=idx)\n " ,
734
- " # loss_value = tf.keras.backend.sparse_categorical_crossentropy('''TODO''', '''TODO''') # TODO\n " ,
735
719
" \n " ,
736
720
" loss_history.append(loss_value.numpy().mean()) # append the loss to the loss_history record\n " ,
737
721
" plotter.plot(loss_history.get())\n " ,
738
722
" \n " ,
739
723
" # Backpropagation\n " ,
740
724
" '''TODO: Use the tape to compute the gradient against all parameters in the CNN model.\n " ,
741
725
" Use cnn_model.trainable_variables to access these parameters.'''\n " ,
742
- " grads = tape.gradient(loss_value, cnn_model.trainable_variables)\n " ,
743
- " # grads = # TODO\n " ,
726
+ " grads = # TODO\n " ,
744
727
" optimizer.apply_gradients(zip(grads, cnn_model.trainable_variables))\n " ,
745
728
" \n " ,
746
729
" comet_model_3.log_figure(figure=plt)\n " ,
786
769
},
787
770
"nbformat" : 4 ,
788
771
"nbformat_minor" : 0
789
- }
772
+ }
0 commit comments