import numpy as np
import matplotlib.pyplot as plt
import h5py
from PIL import Image
%matplotlib inline
train_set_y_orig
Load training data
np.random.seed(42)
train_dataset = h5py.File(’/cxldata/datasets/project/cat-non-cat/train_catvnoncat.h5’, “r”)
train_images = np.array(train_dataset[“train_set_x”][:]) # train set features
train_labels = np.array(train_dataset[“train_set_y”][:]) # train set labels
test_set_y_orig
Load test data
np.random.seed(42)
test_dataset = h5py.File(’/cxldata/datasets/project/cat-non-cat/test_catvnoncat.h5’, “r”)
test_images = np.array(test_dataset[“test_set_x”][:]) # test set features
test_labels = np.array(test_dataset[“test_set_y”][:]) # test set labels
Check all the classes
class_names = np.array(test_dataset[“list_classes”][:])
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
from tensorflow.keras.models import load_model
import os
print(tf.VERSION)
print(tf.keras.version)
1.14.0
2.2.4-tf
train_images.shape
(209, 64, 64, 3)
.shape
train_labels.shape
(209,)
.shape
test_images.shape
(50, 64, 64, 3)
test_labels.shape
(50,)
0
plt.figure()
plt.imshow(np.squeeze(train_images[200]))
<matplotlib.image.AxesImage at 0x7fe670fd98d0>
0
train_labels[200]
1
Let us plot some training images to see how they look
plt.figure(figsize=(10,10))
for i in range(15):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
3
train_images = train_images.reshape((209, 64, 64, 3))
test_images = test_images.reshape((50, 64, 64, 3))
train_images_norm = train_images / 255.0
test_images_norm = test_images / 255.0
3
conv1 = layers.Conv2D(32, (3,3), activation=‘relu’, input_shape=(64,64,3) )
WARNING: Logging before flag parsing goes to stderr.
W0723 07:37:33.122091 140629531981632 deprecation.py:506] From /usr/local/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.init (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
conv2 = layers.Conv2D(64, (3,3), activation=‘relu’)
conv3 = layers.Conv2D(128, (3,3), activation=‘relu’)
#Creating first Max. Pooling Layer
max_pool_1 = layers.MaxPooling2D((2,2))
#Creating second Max. Pooling Layer
max_pool_2 = layers.MaxPooling2D((2,2))
#Creating third Max. Pooling Layer
max_pool_3 = layers.MaxPooling2D((2,2))
flat_layer = layers.Flatten()
fc = layers.Dense(128, activation=‘relu’)
output = layers.Dense(10, ‘softmax’)
TensorFlow Keras uses Keras Sequential API
model = models.Sequential()
model.add(conv1)
model.add(conv2)
model.add(conv3)
model.add(max_pool_1)
model.add(flat_layer)
model.add(fc)
model.add(output)
Let us see what all layers our model has
model.summary()
Model: “sequential”
Layer (type) Output Shape Param #
conv2d (Conv2D) (None, 62, 62, 32) 896
conv2d_1 (Conv2D) (None, 60, 60, 64) 18496
conv2d_2 (Conv2D) (None, 58, 58, 128) 73856
max_pooling2d (MaxPooling2D) (None, 29, 29, 128) 0
flatten (Flatten) (None, 107648) 0
dense (Dense) (None, 128) 13779072
dense_1 (Dense) (None, 10) 1290
Total params: 13,873,610
Trainable params: 13,873,610
Non-trainable params: 0
model.compile(optimizer=‘adam’,
loss=‘sparse_categorical_crossentropy’,
metrics=[‘accuracy’])
model.fit(train_images_norm, train_labels, epochs=20, batch_size=512, shuffle=True, validation_split=0.1)
Train on 188 samples, validate on 21 samples
Epoch 1/20
188/188 [==============================] - 3s 14ms/sample - loss: 2.3129 - acc: 0.0106 - val_loss: 2.0600 - val_acc: 0.8095
Epoch 2/20
188/188 [==============================] - 2s 12ms/sample - loss: 3.6205 - acc: 0.6383 - val_loss: 3.1058 - val_acc: 0.1905
Epoch 3/20
188/188 [==============================] - 2s 11ms/sample - loss: 2.4968 - acc: 0.3617 - val_loss: 1.0421 - val_acc: 0.1905
Epoch 4/20
188/188 [==============================] - 2s 13ms/sample - loss: 0.8519 - acc: 0.3617 - val_loss: 0.6769 - val_acc: 0.8095
Epoch 5/20
188/188 [==============================] - 3s 13ms/sample - loss: 0.7885 - acc: 0.6383 - val_loss: 0.7653 - val_acc: 0.8095
Epoch 6/20
188/188 [==============================] - 4s 20ms/sample - loss: 0.9537 - acc: 0.6383 - val_loss: 0.8064 - val_acc: 0.8095
Epoch 7/20
188/188 [==============================] - 4s 22ms/sample - loss: 0.9220 - acc: 0.6383 - val_loss: 0.8186 - val_acc: 0.8095
Epoch 8/20
188/188 [==============================] - 5s 26ms/sample - loss: 0.9037 - acc: 0.6383 - val_loss: 0.7252 - val_acc: 0.8095
Epoch 9/20
188/188 [==============================] - 5s 28ms/sample - loss: 0.8096 - acc: 0.6383 - val_loss: 0.6344 - val_acc: 0.8095
Epoch 10/20
188/188 [==============================] - 5s 27ms/sample - loss: 0.6666 - acc: 0.6383 - val_loss: 0.5807 - val_acc: 0.8095
Epoch 11/20
188/188 [==============================] - 4s 24ms/sample - loss: 0.5003 - acc: 0.6915 - val_loss: 0.7777 - val_acc: 0.4286
Epoch 12/20
188/188 [==============================] - 5s 25ms/sample - loss: 0.5723 - acc: 0.6862 - val_loss: 0.5421 - val_acc: 0.8095
Epoch 13/20
188/188 [==============================] - 4s 21ms/sample - loss: 0.4142 - acc: 0.6968 - val_loss: 0.6143 - val_acc: 0.8095
Epoch 14/20
188/188 [==============================] - 4s 21ms/sample - loss: 0.4483 - acc: 0.6809 - val_loss: 0.6063 - val_acc: 0.6190
Epoch 15/20
188/188 [==============================] - 4s 23ms/sample - loss: 0.3711 - acc: 0.8404 - val_loss: 0.6636 - val_acc: 0.6190
Epoch 16/20
188/188 [==============================] - 4s 22ms/sample - loss: 0.3992 - acc: 0.8245 - val_loss: 0.6019 - val_acc: 0.7143
Epoch 17/20
188/188 [==============================] - 4s 21ms/sample - loss: 0.3078 - acc: 0.8830 - val_loss: 0.7150 - val_acc: 0.7619
Epoch 18/20
188/188 [==============================] - 4s 21ms/sample - loss: 0.3630 - acc: 0.7872 - val_loss: 0.6080 - val_acc: 0.7143
Epoch 19/20
188/188 [==============================] - 4s 21ms/sample - loss: 0.2626 - acc: 0.9202 - val_loss: 0.6109 - val_acc: 0.6667
Epoch 20/20
188/188 [==============================] - 4s 23ms/sample - loss: 0.2875 - acc: 0.8617 - val_loss: 0.6015 - val_acc: 0.7143
<tensorflow.python.keras.callbacks.History at 0x7fe66fe81c88>
we are using the normalized input data
test_loss, test_accuracy = model.evaluate(test_images_norm, test_labels)
50/50 [==============================] - 0s 6ms/sample - loss: 0.4417 - acc: 0.8600
print(test_accuracy)
0.86
Creating first dropout layer (for the first Conv. layer ‘conv1’)
drop_1 = keras.layers.Dropout(0.5)
Creating second dropout layer (for the second Conv. layer ‘conv2’)
drop_2 = keras.layers.Dropout(0.5)
Creating third dropout layer (for the third Conv. layer ‘conv3’)
drop_3 = keras.layers.Dropout(0.5)
Let us create a new neural network by applying these above created Dropout Layers to the respecive convolutional Layers
Please note that the Dropout layer for all the convolutional layers will be applied after the pooling layer (max. pool layer)
new_model = models.Sequential()
new_model.add(conv1)
No Pooling Layer and Dropout layer for first Convolutional layer ‘conv1’
new_model.add(conv2)
new_model.add(max_pool_2)
new_model.add(drop_2)
new_model.add(conv3)
new_model.add(max_pool_3)
new_model.add(drop_3)
new_model.add(flat_layer)
new_model.add(fc)
new_model.add(output)
ValueError Traceback (most recent call last)
in
14 new_model.add(drop_3)
15 new_model.add(flat_layer)
—> 16 new_model.add(fc)
17 new_model.add(output)
/usr/local/anaconda/lib/python3.6/site-packages/tensorflow/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
455 self._self_setattr_tracking = False # pylint: disable=protected-access
456 try:
–> 457 result = method(self, *args, **kwargs)
458 finally:
459 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/usr/local/anaconda/lib/python3.6/site-packages/tensorflow/python/keras/engine/sequential.py in add(self, layer)
190 # If the model is being built continuously on top of an input layer:
191 # refresh its output.
–> 192 output_tensor = layer(self.outputs[0])
193 if len(nest.flatten(output_tensor)) != 1:
194 raise TypeError('All layers in a Sequential model ’
/usr/local/anaconda/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py in call(self, inputs, *args, **kwargs)
584 # the corresponding TF subgraph inside backend.get_graph()
585 input_spec.assert_input_compatibility(self.input_spec, inputs,
–> 586 self.name)
587 graph = backend.get_graph()
588 with graph.as_default(), backend.name_scope(self._name_scope()):
/usr/local/anaconda/lib/python3.6/site-packages/tensorflow/python/keras/engine/input_spec.py in assert_input_compatibility(input_spec, inputs, layer_name)
157 ’ incompatible with the layer: expected axis ’ + str(axis) +
158 ’ of input shape to have value ’ + str(value) +
–> 159 ’ but received input with shape ’ + str(shape))
160 # Check shape.
161 if spec.shape is not None:
ValueError: Input 0 of layer dense is incompatible with the layer: expected axis -1 of input shape to have value 107648 but received input with shape [None, 25088]