Expt No: 12 Deep Learning Neural Network Model
Date:
Aim: To write a program to demonstrate
deep learning neural network model
Program
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import
ImageDataGenerator
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Conv2D, MaxPooling2D,
Flatten, Dense, Input
from tensorflow.keras.preprocessing import image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Set random seed for reproducibility
np.random.seed(42)
tf.random.set_seed(42)
# Path to the Fruits 360 dataset
train_dir = 'Fruits360/Training'
test_dir = 'Fruits360/Test'
# Define image size and batch size
img_width, img_height = 100, 100
batch_size = 32
# Data augmentation and normalization
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
# Build CNN model
model = Sequential()
model.add(Input(shape=(img_width, img_height, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(train_generator.num_classes,
activation='softmax'))
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
model.fit(
train_generator,
steps_per_epoch=train_generator.samples // batch_size,
epochs=10,
validation_data=test_generator,
validation_steps=test_generator.samples // batch_size)
# Save the trained model
model.save('fruits_cnn_model.keras')
# Evaluate the model on the testing data
test_loss, test_accuracy = model.evaluate(test_generator)
print("Test Loss:", test_loss)
print("Test Accuracy:", test_accuracy)
# Function to load the model
def load_trained_model(model_path='fruits_cnn_model.keras'):
global model
model =
load_model(model_path)
# Load the model
load_trained_model()
# Function to predict fruit from an image
def predict_fruit(image_path):
# Load and
preprocess the image
img =
image.load_img(image_path, target_size=(img_width, img_height))
img_array =
image.img_to_array(img)
img_array =
np.expand_dims(img_array, axis=0) / 255.
# Predict the
class of the fruit
prediction =
model.predict(img_array)
predicted_class =
np.argmax(prediction, axis=1)
# Get the class
label
class_labels =
train_generator.class_indices
reverse_class_labels = {v: k for k, v in class_labels.items()}
predicted_fruit =
reverse_class_labels[predicted_class[0]]
return
predicted_fruit
# Example usage
image_path = 'Fruits/L1.jpeg'
predicted_fruit = predict_fruit(image_path)
# Display the image
img = mpimg.imread(image_path)
plt.imshow(img)
plt.axis('off')
plt.show()
# Print the identified fruit name
print("Identified fruit:", predicted_fruit)
Result: Thus the
program to demonstrate deep
learning neural network model was written and executed.
Sample Output
Found 981 images belonging to 3 classes.
Found 332 images belonging to 3 classes.
Epoch 1/10
30/30 ━━━━━━━━━━━━━━━━━━━━ 32s 866ms/step -
accuracy: 0.7610 - loss: 0.5012 - val_accuracy: 1.0000 - val_loss: 2.0452e-07
Epoch 2/10
30/30 ━━━━━━━━━━━━━━━━━━━━ 1s 8ms/step -
accuracy: 1.0000 - loss: 2.3431e-05 - val_accuracy: 1.0000 - val_loss:
0.0000e+00
Epoch 3/10
30/30 ━━━━━━━━━━━━━━━━━━━━ 15s 441ms/step -
accuracy: 1.0000 - loss: 7.7499e-06 - val_accuracy: 1.0000 - val_loss:
0.0000e+00
Epoch 4/10
30/30 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step -
accuracy: 1.0000 - loss: 2.9802e-08 - val_accuracy: 1.0000 - val_loss:
0.0000e+00
Epoch 5/10
30/30 ━━━━━━━━━━━━━━━━━━━━ 15s 436ms/step -
accuracy: 1.0000 - loss: 3.5505e-07 - val_accuracy: 1.0000 - val_loss:
0.0000e+00
Epoch 6/10
30/30 ━━━━━━━━━━━━━━━━━━━━ 1s 7ms/step -
accuracy: 1.0000 - loss: 1.1176e-08 - val_accuracy: 1.0000 - val_loss:
0.0000e+00
Epoch 7/10
30/30 ━━━━━━━━━━━━━━━━━━━━ 15s 438ms/step -
accuracy: 1.0000 - loss: 1.1372e-06 - val_accuracy: 1.0000 - val_loss:
1.4901e-09
Epoch 8/10
30/30 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step -
accuracy: 1.0000 - loss: 1.8104e-06 - val_accuracy: 1.0000 - val_loss:
0.0000e+00
Epoch 9/10
30/30 ━━━━━━━━━━━━━━━━━━━━ 15s 436ms/step -
accuracy: 1.0000 - loss: 6.8103e-06 - val_accuracy: 1.0000 - val_loss:
0.0000e+00
Epoch 10/10
30/30 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step -
accuracy: 1.0000 - loss: 5.6766e-09 - val_accuracy: 1.0000 - val_loss:
0.0000e+00
11/11 ━━━━━━━━━━━━━━━━━━━━ 1s 112ms/step -
accuracy: 1.0000 - loss: 0.0000e+00
Test Loss: 0.0
Test Accuracy: 1.0
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 219ms/step