Expt No: 11 Neural Network Model
Date:
Aim: To write a program to demonstrate simple
Neural Network model.
Program
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.keras import layers, models, Input
# Load the MNIST dataset
train_data = pd.read_csv('mnist_train.csv')
test_data = pd.read_csv('mnist_test.csv')
# Extract features and labels
X_train = train_data.drop('label', axis=1).values
y_train = train_data['label'].values
X_test = test_data.drop('label', axis=1).values
y_test = test_data['label'].values
# Normalize the features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Convert labels to one-hot encoded vectors
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
# Define the neural network architecture
inputs = Input(shape=(784,))
x = layers.Dense(128, activation='relu')(inputs)
outputs = layers.Dense(10, activation='softmax')(x)
model = models.Model(inputs=inputs, outputs=outputs)
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model and capture history
history = model.fit(X_train, y_train, epochs=10,
batch_size=128, validation_split=0.1)
# Plot training and validation loss
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation
Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()
# Plot training and validation accuracy
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation
Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()
plt.show()
# Evaluate the model on the test set
*test_loss, test_accuracy = model.evaluate(X_test, y_test)
print(f'Test Accuracy: {test_accuracy:.4f}')
# Save the model
model.save('mnist_model.keras')
Result: Thus the
program to demonstrate simple
Neural Network model was written and executed.
Sample Output
Epoch 1/10
422/422 ━━━━━━━━━━━━━━━━━━━━ 26s 10ms/step - accuracy: 0.8402 -
loss: 0.5427 - val_accuracy: 0.9618 - val_loss: 0.1429
Epoch 2/10
422/422 ━━━━━━━━━━━━━━━━━━━━ 3s 6ms/step - accuracy: 0.9613 -
loss: 0.1285 - val_accuracy: 0.9690 - val_loss: 0.1144
Epoch 3/10
422/422 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.9759 -
loss: 0.0826 - val_accuracy: 0.9710 - val_loss: 0.1069
Epoch 4/10
422/422 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9833 -
loss: 0.0571 - val_accuracy: 0.9745 - val_loss: 0.1028
Epoch 5/10
422/422 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9894 -
loss: 0.0397 - val_accuracy: 0.9750 - val_loss: 0.0975
Epoch 6/10
422/422 ━━━━━━━━━━━━━━━━━━━━ 3s 6ms/step - accuracy: 0.9919 -
loss: 0.0303 - val_accuracy: 0.9765 - val_loss: 0.1026
Epoch 7/10
422/422 ━━━━━━━━━━━━━━━━━━━━ 3s 6ms/step - accuracy: 0.9945 -
loss: 0.0233 - val_accuracy: 0.9762 - val_loss: 0.1061
Epoch 8/10
422/422 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.9961 -
loss: 0.0176 - val_accuracy: 0.9757 - val_loss: 0.1038
Epoch 9/10
422/422 ━━━━━━━━━━━━━━━━━━━━ 2s 6ms/step - accuracy: 0.9969 -
loss: 0.0136 - val_accuracy: 0.9758 - val_loss: 0.1071
Epoch 10/10
422/422 ━━━━━━━━━━━━━━━━━━━━ 2s 6ms/step - accuracy: 0.9984 -
loss: 0.0097 - val_accuracy: 0.9767 - val_loss: 0.1134
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9711 -
loss: 0.1274
Test Accuracy: 0.9738
No comments:
Post a Comment
Don't be a silent reader...
Leave your comments...
Anu