Deep Learning Neural Network Model

 

Expt No: 12                                         Deep Learning Neural Network Model

Date:

 

Aim: To write a program to demonstrate deep learning neural network model

 

Program

import numpy as np

import tensorflow as tf

from tensorflow.keras.preprocessing.image import ImageDataGenerator

from tensorflow.keras.models import Sequential, load_model

from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Input

from tensorflow.keras.preprocessing import image

import matplotlib.pyplot as plt

import matplotlib.image as mpimg

 

# Set random seed for reproducibility

np.random.seed(42)

tf.random.set_seed(42)

 

# Path to the Fruits 360 dataset

train_dir = 'Fruits360/Training'

test_dir = 'Fruits360/Test'

 

# Define image size and batch size

img_width, img_height = 100, 100

batch_size = 32

 

# Data augmentation and normalization

train_datagen = ImageDataGenerator(

    rescale=1./255,

    shear_range=0.2,

    zoom_range=0.2,

    horizontal_flip=True)

 

test_datagen = ImageDataGenerator(rescale=1./255)

 

train_generator = train_datagen.flow_from_directory(

    train_dir,

    target_size=(img_width, img_height),

    batch_size=batch_size,

    class_mode='categorical')

 

test_generator = test_datagen.flow_from_directory(

    test_dir,

    target_size=(img_width, img_height),

    batch_size=batch_size,

    class_mode='categorical')

 

# Build CNN model

model = Sequential()

 

model.add(Input(shape=(img_width, img_height, 3)))

 

model.add(Conv2D(32, (3, 3), activation='relu'))

model.add(MaxPooling2D((2, 2)))

 

model.add(Conv2D(64, (3, 3), activation='relu'))

model.add(MaxPooling2D((2, 2)))

 

model.add(Conv2D(128, (3, 3), activation='relu'))

model.add(MaxPooling2D((2, 2)))

 

model.add(Flatten())

 

model.add(Dense(128, activation='relu'))

model.add(Dense(train_generator.num_classes, activation='softmax'))

 

# Compile the model

model.compile(optimizer='adam',

              loss='categorical_crossentropy',

              metrics=['accuracy'])

 

# Train the model

model.fit(

    train_generator,

    steps_per_epoch=train_generator.samples // batch_size,

    epochs=10,

    validation_data=test_generator,

    validation_steps=test_generator.samples // batch_size)

 

# Save the trained model

model.save('fruits_cnn_model.keras')

 

# Evaluate the model on the testing data

test_loss, test_accuracy = model.evaluate(test_generator)

print("Test Loss:", test_loss)

print("Test Accuracy:", test_accuracy)

 

# Function to load the model

def load_trained_model(model_path='fruits_cnn_model.keras'):

    global model

    model = load_model(model_path)

 

# Load the model

load_trained_model()

 

# Function to predict fruit from an image

def predict_fruit(image_path):

    # Load and preprocess the image

    img = image.load_img(image_path, target_size=(img_width, img_height))

    img_array = image.img_to_array(img)

    img_array = np.expand_dims(img_array, axis=0) / 255.

 

    # Predict the class of the fruit

    prediction = model.predict(img_array)

    predicted_class = np.argmax(prediction, axis=1)

 

    # Get the class label

    class_labels = train_generator.class_indices

    reverse_class_labels = {v: k for k, v in class_labels.items()}

    predicted_fruit = reverse_class_labels[predicted_class[0]]

 

    return predicted_fruit

 

# Example usage

image_path = 'Fruits/L1.jpeg'

predicted_fruit = predict_fruit(image_path)

 

# Display the image

img = mpimg.imread(image_path)

plt.imshow(img)

plt.axis('off')

plt.show()

 

# Print the identified fruit name

print("Identified fruit:", predicted_fruit)

 

Result: Thus the program to demonstrate deep learning neural network model was written and executed.

 

Sample Output


Found 981 images belonging to 3 classes.

Found 332 images belonging to 3 classes.

Epoch 1/10

30/30 ━━━━━━━━━━━━━━━━━━━━ 32s 866ms/step - accuracy: 0.7610 - loss: 0.5012 - val_accuracy: 1.0000 - val_loss: 2.0452e-07

Epoch 2/10

30/30 ━━━━━━━━━━━━━━━━━━━━ 1s 8ms/step - accuracy: 1.0000 - loss: 2.3431e-05 - val_accuracy: 1.0000 - val_loss: 0.0000e+00

Epoch 3/10

30/30 ━━━━━━━━━━━━━━━━━━━━ 15s 441ms/step - accuracy: 1.0000 - loss: 7.7499e-06 - val_accuracy: 1.0000 - val_loss: 0.0000e+00

Epoch 4/10

30/30 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step - accuracy: 1.0000 - loss: 2.9802e-08 - val_accuracy: 1.0000 - val_loss: 0.0000e+00

Epoch 5/10

30/30 ━━━━━━━━━━━━━━━━━━━━ 15s 436ms/step - accuracy: 1.0000 - loss: 3.5505e-07 - val_accuracy: 1.0000 - val_loss: 0.0000e+00

Epoch 6/10

30/30 ━━━━━━━━━━━━━━━━━━━━ 1s 7ms/step - accuracy: 1.0000 - loss: 1.1176e-08 - val_accuracy: 1.0000 - val_loss: 0.0000e+00

Epoch 7/10

30/30 ━━━━━━━━━━━━━━━━━━━━ 15s 438ms/step - accuracy: 1.0000 - loss: 1.1372e-06 - val_accuracy: 1.0000 - val_loss: 1.4901e-09

Epoch 8/10

30/30 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 1.0000 - loss: 1.8104e-06 - val_accuracy: 1.0000 - val_loss: 0.0000e+00

Epoch 9/10

30/30 ━━━━━━━━━━━━━━━━━━━━ 15s 436ms/step - accuracy: 1.0000 - loss: 6.8103e-06 - val_accuracy: 1.0000 - val_loss: 0.0000e+00

Epoch 10/10

30/30 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - accuracy: 1.0000 - loss: 5.6766e-09 - val_accuracy: 1.0000 - val_loss: 0.0000e+00

11/11 ━━━━━━━━━━━━━━━━━━━━ 1s 112ms/step - accuracy: 1.0000 - loss: 0.0000e+00

Test Loss: 0.0

Test Accuracy: 1.0

1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 219ms/step


 




Identified fruit: Banana

Neural Network Model

 

Expt No: 11                                         Neural Network Model

Date:

 

Aim: To write a program to demonstrate simple Neural Network model.

 

Program

import numpy as np

import pandas as pd

from sklearn.model_selection import train_test_split

from sklearn.preprocessing import StandardScaler

import tensorflow as tf

from tensorflow.keras import layers, models, Input

 

# Load the MNIST dataset

train_data = pd.read_csv('mnist_train.csv')

test_data = pd.read_csv('mnist_test.csv')

 

# Extract features and labels

X_train = train_data.drop('label', axis=1).values

y_train = train_data['label'].values

X_test = test_data.drop('label', axis=1).values

y_test = test_data['label'].values

 

# Normalize the features

scaler = StandardScaler()

X_train = scaler.fit_transform(X_train)

X_test = scaler.transform(X_test)

 

# Convert labels to one-hot encoded vectors

y_train = tf.keras.utils.to_categorical(y_train)

y_test = tf.keras.utils.to_categorical(y_test)

 


 

# Define the neural network architecture

inputs = Input(shape=(784,))

x = layers.Dense(128, activation='relu')(inputs)

outputs = layers.Dense(10, activation='softmax')(x)

 

model = models.Model(inputs=inputs, outputs=outputs)

 

# Compile the model

model.compile(optimizer='adam',

              loss='categorical_crossentropy',

              metrics=['accuracy'])

 

# Train the model and capture history

history = model.fit(X_train, y_train, epochs=10, batch_size=128, validation_split=0.1)

 

# Plot training and validation loss

plt.plot(history.history['loss'], label='Training Loss')

plt.plot(history.history['val_loss'], label='Validation Loss')

plt.xlabel('Epoch')

plt.ylabel('Loss')

plt.title('Training and Validation Loss')

plt.legend()

plt.show()

 

# Plot training and validation accuracy

plt.plot(history.history['accuracy'], label='Training Accuracy')

plt.plot(history.history['val_accuracy'], label='Validation Accuracy')

plt.xlabel('Epoch')

plt.ylabel('Accuracy')

plt.title('Training and Validation Accuracy')

plt.legend()

plt.show()

 

# Evaluate the model on the test set

*test_loss, test_accuracy = model.evaluate(X_test, y_test)

print(f'Test Accuracy: {test_accuracy:.4f}')

 

# Save the model

model.save('mnist_model.keras')

 

 

 

 

 

 

Result: Thus the program to demonstrate simple Neural Network model was written and executed.

 

 

 

 


 

Sample Output

Epoch 1/10

422/422 ━━━━━━━━━━━━━━━━━━━━ 26s 10ms/step - accuracy: 0.8402 - loss: 0.5427 - val_accuracy: 0.9618 - val_loss: 0.1429

 

Epoch 2/10

422/422 ━━━━━━━━━━━━━━━━━━━━ 3s 6ms/step - accuracy: 0.9613 - loss: 0.1285 - val_accuracy: 0.9690 - val_loss: 0.1144

 

Epoch 3/10

422/422 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.9759 - loss: 0.0826 - val_accuracy: 0.9710 - val_loss: 0.1069

 

Epoch 4/10

422/422 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9833 - loss: 0.0571 - val_accuracy: 0.9745 - val_loss: 0.1028

 

Epoch 5/10

422/422 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.9894 - loss: 0.0397 - val_accuracy: 0.9750 - val_loss: 0.0975

 

Epoch 6/10

422/422 ━━━━━━━━━━━━━━━━━━━━ 3s 6ms/step - accuracy: 0.9919 - loss: 0.0303 - val_accuracy: 0.9765 - val_loss: 0.1026

Epoch 7/10

422/422 ━━━━━━━━━━━━━━━━━━━━ 3s 6ms/step - accuracy: 0.9945 - loss: 0.0233 - val_accuracy: 0.9762 - val_loss: 0.1061

 

Epoch 8/10

422/422 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.9961 - loss: 0.0176 - val_accuracy: 0.9757 - val_loss: 0.1038

 

Epoch 9/10

422/422 ━━━━━━━━━━━━━━━━━━━━ 2s 6ms/step - accuracy: 0.9969 - loss: 0.0136 - val_accuracy: 0.9758 - val_loss: 0.1071

 

Epoch 10/10

422/422 ━━━━━━━━━━━━━━━━━━━━ 2s 6ms/step - accuracy: 0.9984 - loss: 0.0097 - val_accuracy: 0.9767 - val_loss: 0.1134

 

 


 

 

313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9711 - loss: 0.1274

Test Accuracy: 0.9738

 


Expectation–Maximization (EM) algorithm

 

Expt No: 10                                           Expectation–Maximization (EM) algorithm

Date:

 

Aim: To write a program to demonstrate how missing values are handled using SimpleImputer and Expectation–Maximization (EM) algorithms

 

Program

# SimpleImputer method

 

import pandas as pd

from sklearn.model_selection import train_test_split

from sklearn.naive_bayes import GaussianNB

from sklearn.metrics import accuracy_score

from sklearn.impute import SimpleImputer

 

# Step 1: Load the dataset with missing values

iris_data = pd.read_csv("Iris.csv")

 

# Step 2: Introduce missing values in a specific feature (e.g., Petal length)

feature_with_missing_values = "PetalLength"

missing_percentage = 0.2

 

# Randomly select indices to introduce missing values

missing_indices = iris_data.sample(frac=missing_percentage, random_state=42).index

 

# Set the selected indices to NaN in the chosen feature

iris_data.loc[missing_indices, feature_with_missing_values] = None

 

# Step 3: Define features and target

X = iris_data.drop("Species", axis=1)

y = iris_data["Species"]

 

# Step 4: Split the dataset into train and test sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

 

# Step 5: Preprocess the data to handle missing values

imputer = SimpleImputer(strategy="mean")

X_train_imputed = imputer.fit_transform(X_train)

X_test_imputed = imputer.transform(X_test)

 

# Step 6: Train a Naive Bayes classifier

classifier = GaussianNB()

classifier.fit(X_train_imputed, y_train)

 

# Step 7: Make predictions

y_pred = classifier.predict(X_test_imputed)

 

# Step 8: Calculate accuracy

accuracy = accuracy_score(y_test, y_pred)

print("Accuracy after imputing missing values with SimpleImputer: {:.4f}".format(accuracy))

 

 

# EM Algorithm

import pandas as pd

from sklearn.model_selection import train_test_split

from sklearn.naive_bayes import GaussianNB

from sklearn.metrics import accuracy_score

from sklearn.mixture import GaussianMixture

import os

import warnings

 

os.environ["OMP_NUM_THREADS"] = "1"

warnings.filterwarnings("ignore", category=UserWarning)

 

# Step 1: Load the dataset with missing values

iris_data = pd.read_csv("Iris.csv")

 

# Step 2: Introduce missing values in a specific feature (e.g., PetalLength)

feature_with_missing_values = "PetalLength"

missing_percentage = 0.2

 

# Randomly select indices to introduce missing values

missing_indices = iris_data.sample(frac=missing_percentage, random_state=42).index

 

# Set the selected indices to NaN in the chosen feature

iris_data.loc[missing_indices, feature_with_missing_values] = None

 

# Step 3: Define features and target

X = iris_data.drop("Species", axis=1)

y = iris_data["Species"]

 

# Step 4: Split the dataset into train and test sets

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

 

# Step 5: Initialize GMM

gmm = GaussianMixture(n_components=3, random_state=42)

 

# Step 6: Impute missing values using EM algorithm

X_train_imputed = X_train.copy()

X_test_imputed = X_test.copy()

 

for feature in X_train_imputed.columns:

    missing_train_indices = X_train_imputed[X_train_imputed[feature].isnull()].index

    missing_test_indices = X_test_imputed[X_test_imputed[feature].isnull()].index

   

    # Fit GMM on non-missing values

    gmm.fit(X_train_imputed.loc[~X_train_imputed.index.isin(missing_train_indices), [feature]])

   

    # Impute missing values using GMM

    n_samples_train = max(len(missing_train_indices), 1)

    n_samples_test = max(len(missing_test_indices), 1)

   

    X_train_imputed.loc[missing_train_indices, [feature]] = gmm.sample(n_samples=n_samples_train)[0]

    X_test_imputed.loc[missing_test_indices, [feature]] = gmm.sample(n_samples=n_samples_test)[0]

 

# Step 7: Train a Naive Bayes classifier

classifier = GaussianNB()

classifier.fit(X_train_imputed, y_train)

 

# Step 8: Make predictions

y_pred = classifier.predict(X_test_imputed)

 

# Step 9: Calculate accuracy

accuracy = accuracy_score(y_test, y_pred)

print("Accuracy after imputing missing values with EM algorithm: {:.4f}".format(accuracy))

 

 

Result: Thus the program to demonstrate ________________ was written and executed.





Sample Output

 

# SimpleImputer method

Accuracy after imputing missing values with SimpleImputer: 0.6333

 

# EM Algorithm

Accuracy after imputing missing values with EM algorithm: 0.7667

 

 


Clustering Algorithms

 

Expt No: 9                                           Clustering Algorithms

Date:

 

Aim: To write a program to demonstrate clustering algorithms

 

Program

import pandas as pd

from sklearn.cluster import KMeans

from sklearn.metrics import silhouette_score

from sklearn.mixture import GaussianMixture

import matplotlib.pyplot as plt

 

# Load the dataset and drop unnecessary fields

data = pd.read_csv('Mall_Customers.csv')

data.drop(['CustomerID', 'Genre', 'Spending Score'], axis=1, inplace=True)

 

# Use Silhouette Score method to find the optimal number of clusters

silhouette_scores = []

for n_clusters in range(2, 11):  # Trying cluster numbers from 2 to 10

    kmeans = KMeans(n_clusters=n_clusters, random_state=42)

    cluster_labels = kmeans.fit_predict(data)

    silhouette_avg = silhouette_score(data, cluster_labels)

    silhouette_scores.append(silhouette_avg)

 

# Display the optimal number of clusters

optimal_clusters = silhouette_scores.index(max(silhouette_scores)) + 2  # +2 because range starts from 2

print("Optimal number of clusters:", optimal_clusters)

 

# Plot Silhouette Scores

plt.plot(range(2, 11), silhouette_scores, marker='o')

plt.xlabel('Number of Clusters')

plt.ylabel('Silhouette Score')

plt.title('Silhouette Score vs Number of Clusters')

plt.show()

 


 

# Display KMeans clusters

optimal_kmeans = KMeans(n_clusters=optimal_clusters, random_state=42)

optimal_cluster_labels = optimal_kmeans.fit_predict(data)

 

# Plotting the clusters along with centroids

plt.scatter(data.iloc[:, 0], data.iloc[:, 1], c=optimal_cluster_labels, cmap='viridis')

plt.scatter(optimal_kmeans.cluster_centers_[:, 0], optimal_kmeans.cluster_centers_[:, 1], s=150, c='red', marker='o')

plt.xlabel('Annual Income')

plt.ylabel('Age')

plt.title('KMeans Clustering with {} clusters'.format(optimal_clusters))

plt.show()

 

 

# Fit Gaussian Mixture Model

gmm = GaussianMixture(n_components=optimal_clusters, random_state=42)

gmm.fit(data)

gmm_cluster_labels = gmm.predict(data)

 

# Display GMM clusters

plt.scatter(data.iloc[:, 0], data.iloc[:, 1], c=gmm_cluster_labels, cmap='viridis')

plt.scatter(gmm.means_[:, 0], gmm.means_[:, 1], s=150, c='red', marker='o')

plt.xlabel('Annual Income')

plt.ylabel('Age')

plt.title('Gaussian Mixture Model Clustering with {} clusters'.format(optimal_clusters))

plt.show()

 

 

 

Result: Thus the program to demonstrate clustering algorithms was written and executed.