0% found this document useful (0 votes)
45 views

ML Project

This project report summarizes a machine learning project on handwritten digit classification using a multiclass neural network. The report was authored by Thrisha S, Adarsh Hegde, and Akarsh Hegde. It describes using a sequential model with three dense layers for classification and training the model on handwritten digit data to classify digits 0-9.

Uploaded by

Vaishnavi B V
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
45 views

ML Project

This project report summarizes a machine learning project on handwritten digit classification using a multiclass neural network. The report was authored by Thrisha S, Adarsh Hegde, and Akarsh Hegde. It describes using a sequential model with three dense layers for classification and training the model on handwritten digit data to classify digits 0-9.

Uploaded by

Vaishnavi B V
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 10

Project Report

Machine Learning and


Applications

Topic:
Handwritten Digit Classification Using
Multiclass Neural Network

Thrisha S PES1UG20EC213
Adarsh Hegde PES1UG20EC246
Akarsh Hegde PES1UG20EC248
Code:

import numpy as np
import tensorflow as tf
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib import pyplot
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.activations import linear, relu, sigmoid

def my_softmax(z):
""" Softmax converts a vector of values to a probability distribution.
Args:

z (ndarray (N,)) : input data, N features


Returns:

a (ndarray (N,)) : softmax of z


"""

### START CODE HERE ###

ez = np.exp(z)
a = ez/np.sum(ez)

### END CODE HERE ###


return a

#element-wise exponenial

np.set_printoptions(precision=2)
z = np.array([1., 2., 3., 4.])
a = my_softmax(z)
atf = tf.nn.softmax(z)
print(f"my_softmax(z): {a}")
print(f"tensorflow softmax(z): {atf}")

my_softmax(z): [0.03 0.09 0.24 0.64]


tensorflow softmax(z): [0.03 0.09 0.24 0.64]

data = loadmat('ex3data1.mat')
X, y = data['X'], data['y'].ravel()
y[y == 10] = 0

print ('The first element of X is: ', X[0])


The first element of X is: [ 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00

0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00


0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 8.56e-06 1.94e-06 -7.37e-04

-8.13e-03 -1.86e-02 -1.87e-02 -1.88e-02 -1.91e-02 -1.64e-02 -3.78e-03


3.30e-04 1.28e-05 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 1.16e-04 1.20e-04 -1.40e-02 -2.85e-02 8.04e-02
2.67e-01 2.74e-01 2.79e-01 2.74e-01 2.25e-01 2.78e-02 -7.06e-03
2.35e-04 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
1.28e-17 -3.26e-04 -1.39e-02 8.16e-02 3.83e-01 8.58e-01 1.00e+00
9.70e-01 9.31e-01 1.00e+00 9.64e-01 4.49e-01 -5.60e-03 -3.78e-03
0.00e+00 0.00e+00 0.00e+00 0.00e+00 5.11e-06 4.36e-04 -3.96e-03

-2.69e-02 1.01e-01 6.42e-01 1.03e+00 8.51e-01 5.43e-01 3.43e-01


2.69e-01 6.68e-01 1.01e+00 9.04e-01 1.04e-01 -1.66e-02 0.00e+00
0.00e+00 0.00e+00 0.00e+00 2.60e-05 -3.11e-03 7.52e-03 1.78e-01
7.93e-01 9.66e-01 4.63e-01 6.92e-02 -3.64e-03 -4.12e-02 -5.02e-02
1.56e-01 9.02e-01 1.05e+00 1.51e-01 -2.16e-02 0.00e+00 0.00e+00
0.00e+00 5.87e-05 -6.41e-04 -3.23e-02 2.78e-01 9.37e-01 1.04e+00
5.98e-01 -3.59e-03 -2.17e-02 -4.81e-03 6.17e-05 -1.24e-02 1.55e-01
9.15e-01 9.20e-01 1.09e-01 -1.71e-02 0.00e+00 0.00e+00 1.56e-04

-4.28e-04 -2.51e-02 1.31e-01 7.82e-01 1.03e+00 7.57e-01 2.85e-01


4.87e-03 -3.19e-03 0.00e+00 8.36e-04 -3.71e-02 4.53e-01 1.03e+00
5.39e-01 -2.44e-03 -4.80e-03 0.00e+00 0.00e+00 -7.04e-04 -1.27e-02
1.62e-01 7.80e-01 1.04e+00 8.04e-01 1.61e-01 -1.38e-02 2.15e-03

-2.13e-04 2.04e-04 -6.86e-03 4.32e-04 7.21e-01 8.48e-01 1.51e-01


-2.28e-02 1.99e-04 0.00e+00 0.00e+00 -9.40e-03 3.75e-02 6.94e-01
1.03e+00 1.02e+00 8.80e-01 3.92e-01 -1.74e-02 -1.20e-04 5.55e-05
-2.24e-03 -2.76e-02 3.69e-01 9.36e-01 4.59e-01 -4.25e-02 1.17e-03
1.89e-05 0.00e+00 0.00e+00 -1.94e-02 1.30e-01 9.80e-01 9.42e-01
7.75e-01 8.74e-01 2.13e-01 -1.72e-02 0.00e+00 1.10e-03 -2.62e-02
1.23e-01 8.31e-01 7.27e-01 5.24e-02 -6.19e-03 0.00e+00 0.00e+00
0.00e+00 0.00e+00 -9.37e-03 3.68e-02 6.99e-01 1.00e+00 6.06e-01
3.27e-01 -3.22e-02 -4.83e-02 -4.34e-02 -5.75e-02 9.56e-02 7.27e-01
6.95e-01 1.47e-01 -1.20e-02 -3.03e-04 0.00e+00 0.00e+00 0.00e+00
0.00e+00 -6.77e-04 -6.51e-03 1.17e-01 4.22e-01 9.93e-01 8.82e-01
7.46e-01 7.24e-01 7.23e-01 7.20e-01 8.45e-01 8.32e-01 6.89e-02
-2.78e-02 3.59e-04 7.15e-05 0.00e+00 0.00e+00 0.00e+00 0.00e+00
1.53e-04 3.17e-04 -2.29e-02 -4.14e-03 3.87e-01 5.05e-01 7.75e-01
9.90e-01 1.01e+00 1.01e+00 7.38e-01 2.15e-01 -2.70e-02 1.33e-03
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 2.36e-04 -2.26e-03 -2.52e-02 -3.74e-02 6.62e-02 2.91e-01
3.23e-01 3.06e-01 8.76e-02 -2.51e-02 2.37e-04 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 6.21e-18 6.73e-04 -1.13e-02 -3.55e-02 -3.88e-02
-3.71e-02 -1.34e-02 9.91e-04 4.89e-05 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00

0.00e+00]

print ('The shape of X is: ' + str(X.shape))


print ('The shape of y is: ' + str(y.shape))

The shape of X is: (5000, 400)


The shape of y is: (5000,)

print ('The first element of y is: ', y[0])


print ('The last element of y is: ', y[-1])

The first element of y is: 0


The last element of y is: 9

tf.random.set_seed(1234) # for consistent results


model = Sequential(

[
### START CODE HERE ###
tf.keras.Input(shape=(400,)),

Dense(25, activation='relu', name = "L1")


tf.keras.layers.Dense(25,activation='relu',name='L1'),
tf.keras.layers.Dense(15,activation='relu',name='L2'),
tf.keras.layers.Dense(10,activation='linetafr.'r,annadmoem=.'sLe3t')_,seed(1234) # for

### END CODE HERE ###


], name = "my_model"
)

model.summary()

Model: "my_model"
_________________________________________________________________

Layer (type) Output Shape Param #


=================================================================

L1 (Dense)
L2 (Dense)
L3 (Dense)

(None, 25) 10025


(None, 15) 390
(None, 10) 160

=================================================================
Total params: 10,575
Trainable params: 10,575
Non-trainable params: 0
_________________________________________________________________

[layer1, layer2, layer3] = model.layers

W1,b1 = layer1.get_weights()
W2,b2 = layer2.get_weights()
W3,b3 = layer3.get_weights()
print(f"W1 shape = {W1.shape}, b1 shape = {b1.shape}")
print(f"W2 shape = {W2.shape}, b2 shape = {b2.shape}")
print(f"W3 shape = {W3.shape}, b3 shape = {b3.shape}")

W1 shape = (400, 25), b1 shape = (25,)


W2 shape = (25, 15), b2 shape = (15,)
W3 shape = (15, 10), b3 shape = (10,)

model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),

history = model.fit(X,y,epochs=50)
157/157 [==============================] - 1s 5ms/step - loss: 0.1606
Epoch 15/50
157/157 [==============================] - 1s 4ms/step - loss: 0.1482
Epoch 16/50

157/157 [==============================] - 1s 5ms/step - loss: 0.1437


Epoch 17/50
157/157 [==============================] - 1s 4ms/step - loss: 0.1341
Epoch 18/50

157/157 [==============================] - 1s 4ms/step - loss: 0.1281


Epoch 19/50
157/157 [==============================] - 1s 5ms/step - loss: 0.1193
Epoch 20/50

157/157 [==============================] - 1s 5ms/step - loss: 0.1149


Epoch 21/50
157/157 [==============================] - 1s 5ms/step - loss: 0.1102
Epoch 22/50

157/157 [==============================] - 1s 5ms/step - loss: 0.1062


Epoch 23/50
157/157 [==============================] - 1s 6ms/step - loss: 0.0994
Epoch 24/50

157/157 [==============================] - 1s 5ms/step - loss: 0.0916


Epoch 25/50
157/157 [==============================] - 1s 5ms/step - loss: 0.0903
Epoch 26/50

157/157 [==============================] - 1s 8ms/step - loss: 0.0854


Epoch 27/50
157/157 [==============================] - 1s 9ms/step - loss: 0.0809
Epoch 28/50

157/157 [==============================] - 1s 8ms/step - loss: 0.0755


Epoch 29/50
157/157 [==============================] - 1s 9ms/step - loss: 0.0705
Epoch 30/50

def displayData(X, example_width=None, figsize=(3, 3)):


"""

Displays 2D data stored in X in a nice grid.


"""
# Compute rows, cols
if X.ndim == 2:
m, n = X.shape
elif X.ndim == 1:

n = X.size
m=1
X = X[None] # Promote to a 2 dimensional array

else:
raise IndexError('Input X should be 1 or 2 dimensional.')

example_width = example_width or int(np.round(np.sqrt(n)))


example_height = n / example_width

# Compute number of items to display


display_rows = int(np.floor(np.sqrt(m)))
display_cols = int(np.ceil(m / display_rows))

fig, ax_array = pyplot.subplots(display_rows, display_cols, figsize=figsize


fig.subplots_adjust(wspace=0.025, hspace=0.025)

ax_array = [ax_array] if m == 1 else ax_array.ravel()

for i, ax in enumerate(ax_array):
ax.imshow(X[i].reshape(example_width, example_width, order='F'),

cmap='Greys', extent=[0, 1, 0, 1])


ax.axis('off')

image_of_two = X[99]
displayData(image_of_two)

prediction = model.predict(image_of_two.reshape(1,400)) # prediction

print(f" predicting: \n{prediction}")


print(f" Largest Prediction index: {np.argmax(prediction)}")

1/1 [==============================] - 0s 119ms/step


predicting:

[[ 15.85 -15.92 -3.93 -0.39 -18.84 -8.25 2.24 2.72 -2.19 6.21]]
Largest Prediction index: 0
m, n = X.shape

fig, axes = pyplot.subplots(8,8, figsize=(5,5))


fig.tight_layout(pad=0.13,rect=[0, 0.03, 1, 0.91]) #[left, bottom, right, top]

for i,ax in enumerate(axes.flat):


# Select random indices
random_index = np.random.randint(m)

# Select rows corresponding to the random indices and


# reshape the image
X_random_reshaped = X[random_index].reshape((20,20)).T

# Display the image


ax.imshow(X_random_reshaped, cmap='gray')

# Predict using the Neural Network


prediction = model.predict(X[random_index].reshape(1,400))
prediction_p = tf.nn.softmax(prediction)
yhat = np.argmax(prediction_p)

# Display the label above the image


ax.set_title(f"{y[random_index]},{yhat}",fontsize=10)
ax.set_axis_off()

fig.suptitle("Label, yhat", fontsize=14)


pyplot.show()

1/1 [==============================] - 0s 31ms/step


1/1 [==============================] - 0s 28ms/step
1/1 [==============================] - 0s 25ms/step
1/1 [==============================] - 0s 25ms/step
1/1 [==============================] - 0s 26ms/step
1/1 [==============================] - 0s 25ms/step
1/1 [==============================] - 0s 32ms/step
1/1 [==============================] - 0s 23ms/step
1/1 [==============================] - 0s 30ms/step
1/1 [==============================] - 0s 25ms/step
1/1 [==============================] - 0s 26ms/step
1/1 [==============================] - 0s 25ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 39ms/step
1/1 [==============================] - 0s 37ms/step
1/1 [==============================] - 0s 37ms/step
1/1 [==============================] - 0s 40ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 37ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 37ms/step
1/1 [==============================] - 0s 35ms/step

def

display_errors(model,X,y): f = model.predict(X)
yhat = np.argmax(f, axis=1) doo = yhat != y[:]
idxs = np.where(yhat != y[:])[0]
if len(idxs) == 0:

print("no errors found")


else:

cnt = min(8, len(idxs))


fig, ax = plt.subplots(1,cnt, figsize=(5,1.2))
fig.tight_layout(pad=0.18,rect=[0, 0.03, 1, 0.80]) #[left, bottom, righ

for i in range(cnt):
j = idxs[i]

X_reshaped = X[j].reshape((20,20)).T

# Display the image


ax[i].imshow(X_reshaped, cmap='gray')

# Predict using the Neural Network


prediction = model.predict(X[j].reshape(1,400))
prediction_p = tf.nn.softmax(prediction)
yhat = np.argmax(prediction_p)

# Display the label above the image


ax[i].set_title(f"{y[j]},{yhat}",fontsize=10)
ax[i].set_axis_off()
fig.suptitle("Label, yhat", fontsize=12)

return(len(idxs))

error = display_errors(model,X,y)
print( f"{error} ererrorosro=utdiosfpl{alye_ne(rXr)o}rsi(mmaogdeesl",)X,y) print( f"Accuracy:
{((len(X)-error)*100)/len(X)}%")

157/157 [==============================] - 0s 2ms/step

5 errors out of 5000 images


Accuracy: 99.9%

You might also like