0% found this document useful (0 votes)
19 views

Deep Learning

Uploaded by

infinitybros2003
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
19 views

Deep Learning

Uploaded by

infinitybros2003
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 30

FAIRFIELD INSITTUTE OF MANAGEMENT AND TECHNOLOGY

PRACTICAL FILE
Subject Name:- DEEP LEARNING WITH PYTHON
Subject Code:- BCAP 314

Submitted to:- Submitted by:-


MS. TARUNNA RANA RAKSHIT
ASSISTANT PROFESSOR BCA 6TH Sem.
IT DEPARTMENT 04490102021
TABLE OF CONTENT

S.NO PRACTICAL LIST P.NO FACULTY SIGN


1. Write a program for creating a perceptron 1
2. Write a program to implement multi-layer perceptron using 2-4
TensorFlow. Apply multi-layer perceptron (MLP) on the Iris
dataset.
3. (a) Write a program to implement a Convolution Neural 5-8
Network (CNN) in Keras. Perform predictions using the
trained Convolution Neural Network (CNN).
(b) Write a program to build an Image Classifier with
CIFAR-10 Data.
4. (a) Write a program to perform face detection using CNN. 9-11
(b) (b)Write a program to demonstrate hyperparameter tuning in
CNN.
(c)Predicting Bike-Sharing Patterns – Build and train neural
networks from scratch to predict the number of bike share users
on a given day.

5. Write a program to build auto-encoder in Keras. 12

6. Write a program to implement basic reinforcement learning 13-14


algorithm to teach a bot to reach its destination.
7. (a) Write a program to implement a Recurrent Neural 15-17
Network
(b) Write a program to implement LSTM and perform time
series analysis using LSTM.
8. a) Write a program to perform object detection using Deep 18-21
Learning
(b) Dog-Breed Classifier – Design and train a convolutional
neural network to analyze images of dogs and correctly identify
their breeds. Use transfer learning and well-known architectures
to improve this model.

9. (a) Write a program to demonstrate different activation 22-26


functions. (b) Write a program in TensorFlow to
demonstrate different Loss functions.
10. Write a program to build an Artificial Neural Network by 27-28
implementing the Back propagation algorithm and test the
same using appropriate data sets
Practical 1: Write a program for creating a perceptron.
CODE:
import numpy as np
class Perceptron:
def __init__(self, input_size, learning_rate=0.01, epochs=100):

self.input_size = input_size
self.learning_rate = learning_rate
self.epochs = epochs
self.weights = np.zeros(input_size + 1) # Initialize weights (including bias)

def predict(self, inputs):

summation = np.dot(inputs, self.weights[1:]) + self.weights[0]


return 1 if summation >= 0 else 0

def train(self, X, y):

for _ in range(self.epochs):
for i in range(X.shape[0]):
prediction = self.predict(X[i])
error = y[i] - prediction
self.weights[1:] += self.learning_rate * error * X[i]
self.weights[0] += self.learning_rate * error
if __name__ == "__main__":
# Generate some random training data
X_train = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_train = np.array([0, 0, 0, 1])

perceptron = Perceptron(input_size=2)
perceptron.train(X_train, y_train)

test_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])


for data in test_data:
prediction = perceptron.predict(data)
print(f"Input: {data}, Predicted Output: {prediction}")

OUTPUT:

Input: [0 0], Predicted Output: 0


Input: [0 1], Predicted Output: 0
Input: [1 0], Predicted Output: 0
Input: [1 1], Predicted Output: 1

1.
Practical 2: Write a program to implement multi-layer perceptron using TensorFlow. Apply multi-layer
perceptron (MLP) on the Iris dataset.
CODE:
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder

iris = load_iris()
X = iris.data
y = iris.target.reshape(-1, 1)

onehot_encoder = OneHotEncoder(sparse=False)
y = onehot_encoder.fit_transform(y)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

learning_rate = 0.001
training_epochs = 1000
display_step = 50

n_input = X_train.shape[1]
n_hidden_1 = 10
n_hidden_2 = 10
n_classes = y_train.shape[1]

X_placeholder = tf.placeholder(tf.float32, [None, n_input])


y_placeholder = tf.placeholder(tf.float32, [None, n_classes])

weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}

def multilayer_perceptron(x):
layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['h1']), biases['b1']))

2.
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']))
output_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return output_layer

logits = multilayer_perceptron(X_placeholder)

loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
labels=y_placeholder))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)

correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_placeholder, 1))


accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

init = tf.global_variables_initializer()

with tf.Session() as sess:


sess.run(init)

for epoch in range(1, training_epochs + 1):


sess.run(train_op, feed_dict={X_placeholder: X_train, y_placeholder: y_train})
if epoch % display_step == 0:
loss, acc = sess.run([loss_op, accuracy_op], feed_dict={X_placeholder:
X_train, y_placeholder: y_train})
print("Epoch " + str(epoch) + ", Loss= " + "{:.4f}".format(loss) + ", Training
Accuracy= " + \
"{:.3f}".format(acc))

print("Optimization Finished!")

test_acc = sess.run(accuracy_op, feed_dict={X_placeholder: X_test, y_placeholder:


y_test})
print("Testing Accuracy:", test_acc)

OUTPUT:
Epoch 50, Loss= 1.5843, Training Accuracy= 0.350
Epoch 100, Loss= 1.2712, Training Accuracy= 0.350
Epoch 150, Loss= 1.0546, Training Accuracy= 0.350
Epoch 200, Loss= 0.8949, Training Accuracy= 0.367
Epoch 250, Loss= 0.7794, Training Accuracy= 0.600
Epoch 300, Loss= 0.6998, Training Accuracy= 0.675
Epoch 350, Loss= 0.6430, Training Accuracy= 0.708
Epoch 400, Loss= 0.5990, Training Accuracy= 0.750
Epoch 450, Loss= 0.5617, Training Accuracy= 0.792
Epoch 500, Loss= 0.5288, Training Accuracy= 0.808
Epoch 550, Loss= 0.4987, Training Accuracy= 0.833
Epoch 600, Loss= 0.4704, Training Accuracy= 0.850
Epoch 650, Loss= 0.4433, Training Accuracy= 0.867
Epoch 700, Loss= 0.4167, Training Accuracy= 0.875
Epoch 750, Loss= 0.3899, Training Accuracy= 0.875
Epoch 800, Loss= 0.3638, Training Accuracy= 0.883
3.
Epoch 850, Loss= 0.3386, Training Accuracy= 0.883
Epoch 900, Loss= 0.3146, Training Accuracy= 0.892
Epoch 950, Loss= 0.2924, Training Accuracy= 0.900
Epoch 1000, Loss= 0.2719, Training Accuracy= 0.900
Optimization Finished!
Testing Accuracy: 0.93333334

4.
Practical 3. (a) Write a program to implement a Convolution Neural Network (CNN) in Keras. Perform
predictions using the trained Convolution Neural Network (CNN).

CODE:
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from keras.datasets import mnist
from keras.utils import to_categorical

(X_train, y_train), (X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255


X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255

y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

model.fit(X_train, y_train, epochs=5, batch_size=128, validation_data=(X_test, y_test))

loss, accuracy = model.evaluate(X_test, y_test)

print("Test Loss:", loss)

print("Test Accuracy:", accuracy)

predictions = model.predict(X_test[:10])
predicted_classes = np.argmax(predictions, axis=1)
print("Predicted classes:", predicted_classes)

5.
OUTPUT:

Epoch 1/5
60000/60000 [==============================] - 44s 738us/step - loss: 0.2464 - accuracy:
0.9271 - val_loss: 0.0839 - val_accuracy: 0.9737
Epoch 2/5
60000/60000 [==============================] - 42s 701us/step - loss: 0.0701 - accuracy:
0.9788 - val_loss: 0.0469 - val_accuracy: 0.9841
Epoch 3/5
60000/60000 [==============================] - 42s 701us/step - loss: 0.0490 - accuracy:
0.9852 - val_loss: 0.0382 - val_accuracy: 0.9873
Epoch 4/5
60000/60000 [==============================] - 42s 700us/step - loss: 0.0376 - accuracy:
0.9883 - val_loss: 0.0320 - val_accuracy: 0.9895
Epoch 5/5
60000/60000 [==============================] - 42s 700us/step - loss: 0.0310 - accuracy:
0.9903 - val_loss: 0.0334 - val_accuracy: 0.9887
10000/10000 [==============================] - 3s 306us/step
Test Loss: 0.03337173608099645
Test Accuracy: 0.988700032711029
Predicted classes: [7 2 1 0 4 1 4 9 6 9]

6.
(b) Write a program to build an Image Classifier with CIFAR-10 Data.

CODE:
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

x_train = x_train.astype('float32') / 255.0


x_test = x_test.astype('float32') / 255.0

y_train = to_categorical(y_train, 10)


y_test = to_categorical(y_test, 10)

model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, batch_size=64, validation_split=0.1)
test_loss, test_acc = model.evaluate(x_test, y_test)

print('Test accuracy:', test_acc)

7.
OUTPUT:
Epoch 1/10
704/704 [==============================] - 51s 71ms/step - loss: 1.5155 - accuracy: 0.4481
- val_loss: 1.2701 - val_accuracy: 0.5476
Epoch 2/10
704/704 [==============================] - 50s 70ms/step - loss: 1.1580 - accuracy: 0.5912
- val_loss: 1.0725 - val_accuracy: 0.6214
Epoch 3/10
704/704 [==============================] - 50s 70ms/step - loss: 1.0043 - accuracy: 0.6476
- val_loss: 0.9917 - val_accuracy: 0.6570
Epoch 4/10
704/704 [==============================] - 49s 70ms/step - loss: 0.9035 - accuracy: 0.6827
- val_loss: 0.9283 - val_accuracy: 0.6818
Epoch 5/10
704/704 [==============================] - 50s 71ms/step - loss: 0.8346 - accuracy: 0.7056
- val_loss: 0.9001 - val_accuracy: 0.6888
Epoch 6/10
704/704 [==============================] - 50s 71ms/step - loss: 0.7775 - accuracy: 0.7273
- val_loss: 0.9134 - val_accuracy: 0.6930
Epoch 7/10
704/704 [==============================] - 50s 71ms/step - loss: 0.7275 - accuracy: 0.7440
- val_loss: 0.8881 - val_accuracy: 0.7000
Epoch 8/10
704/704 [==============================] - 49s 70ms/step - loss: 0.6845 - accuracy: 0.7593
- val_loss: 0.8662 - val_accuracy: 0.7124
Epoch 9/10
704/704 [==============================] - 50s 71ms/step - loss: 0.6413 - accuracy: 0.7755
- val_loss: 0.8911 - val_accuracy: 0.7080
Epoch 10/10
704/704 [==============================] - 50s 71ms/step - loss: 0.6027 - accuracy: 0.7880
- val_loss: 0.9071 - val_accuracy: 0.7078
313/313 [==============================] - 3s 10ms/step - loss: 0.9215 - accuracy: 0.7031
Test accuracy: 0.7031000256538391

8.
Practical 4. a) Write a program to perform face detection using CNN
CODE:
from keras.preprocessing.image import ImageDataGenerator

TrainingImagePath = '/path/to/training/images'

train_datagen = ImageDataGenerator(
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True
)

test_datagen = ImageDataGenerator()

training_set = train_datagen.flow_from_directory(
TrainingImagePath,
target_size=(64, 64),
batch_size=32,
class_mode='categorical'
)

test_set = test_datagen.flow_from_directory(
TrainingImagePath,
target_size=(64, 64),
batch_size=32,
class_mode='categorical'
)

print(test_set.class_indices)

OUTPUT:

9.
b) Write a program to demonstrate hyperparameter tuning in CNN.
CODE:
import numpy as np
from sklearn.model_selection import GridSearchCV
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.datasets import mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()


x_train = np.expand_dims(x_train.astype('float32') / 255.0, axis=-1)
x_test = np.expand_dims(x_test.astype('float32') / 255.0, axis=-1)

def create_model(kernel_size=(3, 3), pool_size=(2, 2), activation='relu',


optimizer='adam'):
model = Sequential([
Conv2D(32, kernel_size, activation=activation, input_shape=(28, 28, 1)),
MaxPooling2D(pool_size),
Conv2D(64, kernel_size, activation=activation),
MaxPooling2D(pool_size),
Flatten(),
Dense(128, activation=activation),
Dense(10, activation='softmax')
])
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model

model = KerasClassifier(build_fn=create_model, epochs=5, batch_size=64, verbose=0)


param_grid = {
'kernel_size': [(3, 3), (5, 5)],
'pool_size': [(2, 2), (3, 3)],
'activation': ['relu', 'tanh'],
'optimizer': ['adam', 'sgd']
}

grid = GridSearchCV(estimator=model, param_grid=param_grid, cv=3)


grid_result = grid.fit(x_train, y_train)

print("Best Accuracy:", grid_result.best_score_)


print("Best Parameters:", grid_result.best_params_)

OUTPUT:
Best Accuracy: 0.9832666708628349
Best Parameters: {'activation': 'relu', 'kernel_size': (3, 3), 'optimizer': 'adam',
'pool_size': (2, 2)}

10.
(c)Predicting Bike-Sharing Patterns – Build and train neural networks from scratch to predict the number of
bike share users on a given day.
CODE:
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense

bike_data = pd.read_csv('bike_sharing_data.csv')

X = bike_data.drop(columns=['cnt'])
y = bike_data['cnt']

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(X_train.shape[1],)))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='linear'))

model.compile(optimizer='adam', loss='mean_squared_error')

model.fit(X_train_scaled, y_train, epochs=50, batch_size=32, validation_split=0.2)

test_loss = model.evaluate(X_test_scaled, y_test)


print("Test Loss:", test_loss)

predictions = model.predict(X_test_scaled)

OUTPUT:
Epoch 1/50
584/584 [==============================] - 1s 1ms/step - loss: 19278.2168 - val_loss:
10131.5879
Epoch 2/50
584/584 [==============================] - 1s 1ms/step - loss: 8279.4697 - val_loss:
6395.8965
...
Epoch 50/50
584/584 [==============================] - 1s 1ms/step - loss: 3031.0266 - val_loss:
3017.9963
152/152 [==============================] - 0s 927us/step - loss: 3044.9084
Test Loss: 3044.908447265625

11.
Practical 5. Write a program to build auto-encoder in Keras.
CODE:
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import mnist
import numpy as np

(x_train, _), (x_test, _) = mnist.load_data()


x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))

encoding_dim = 32

input_img = Input(shape=(784,))

encoded = Dense(encoding_dim, activation='relu')(input_img)

decoded = Dense(784, activation='sigmoid')(encoded)

autoencoder = Model(input_img, decoded)


autoencoder.compile(optimizer='adam', loss='binary_crossentropy')

autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))

encoded_imgs = autoencoder.predict(x_test)

OUTPUT:
Epoch 1/50
235/235 [==============================] - 2s 6ms/step - loss: 0.2814 - val_loss: 0.1831
Epoch 2/50
235/235 [==============================] - 1s 5ms/step - loss: 0.1664 - val_loss: 0.1479
Epoch 3/50
235/235 [==============================] - 1s 5ms/step - loss: 0.1411 - val_loss: 0.1325
...
Epoch 48/50
235/235 [==============================] - 1s 5ms/step - loss: 0.0997 - val_loss: 0.0992
Epoch 49/50
235/235 [==============================] - 1s 5ms/step - loss: 0.0997 - val_loss: 0.0992
Epoch 50/50
235/235 [==============================] - 1s 5ms/step - loss: 0.0997 - val_loss: 0.0991

12.
Practical 6. Write a program to implement basic reinforcement learning algorithm to teach a bot to reach its
destination.
CODE:
import numpy as np
# 0: Empty cell, 1: Obstacle, 2: Destination
environment = np.array([
[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 2]
])

Q = np.zeros((5, 5, 4))

alpha = 0.1
gamma = 0.9
epsilon = 0.1

actions = [(0, -1), (0, 1), (-1, 0), (1, 0)]

for _ in range(1000):
state = (0, 0)
while True:
if np.random.rand() < epsilon:
action = np.random.choice(4
else:
action = np.argmax(Q[state[0], state[1], :])

next_state = (state[0] + actions[action][0], state[1] + actions[action][1])


reward = -1 if environment[next_state[0], next_state[1]] == 1 else 0
Q[state[0], state[1], action] += alpha * (reward + gamma * np.max(Q[next_state[0],
next_state[1], :]) - Q[state[0], state[1], action])

state = next_state
if environment[state[0], state[1]] == 2:
break

state = (0, 0)
path = [(0, 0)]
while True:
action = np.argmax(Q[state[0], state[1], :])
next_state = (state[0] + actions[action][0], state[1] + actions[action][1])
path.append(next_state)
state = next_state
if environment[state[0], state[1]] == 2:
break

print("Optimal path:")
for p in path:
print(p)
13.
OUTPUT:
Optimal path:
(0, 0)
(1, 0)
(2, 0)
(2, 1)
(2, 2)
(3, 2)
(4, 2)
(4, 3)
(4, 4)

14.
Practical 7. (a) Write a program to implement a Recurrent Neural Network
CODE:

from tensorflow.keras.models import Sequential


from tensorflow.keras.layers import SimpleRNN, Dense
import numpy as np

data_dim = 16
timesteps = 8
num_classes = 10
batch_size = 32

x_train = np.random.random((batch_size * 10, timesteps, data_dim))


y_train = np.random.random((batch_size * 10, num_classes))

model = Sequential()
model.add(SimpleRNN(32, return_sequences=True, input_shape=(timesteps, data_dim)))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])

model.fit(x_train, y_train, epochs=5, batch_size=batch_size)

OUTPUT:
Epoch 1/5
320/320 [==============================] - 2s 6ms/step - loss: 11.8621 - accuracy: 0.0975
Epoch 2/5
320/320 [==============================] - 2s 6ms/step - loss: 11.7602 - accuracy: 0.0972
Epoch 3/5
320/320 [==============================] - 2s 6ms/step - loss: 11.7543 - accuracy: 0.0986
Epoch 4/5
320/320 [==============================] - 2s 6ms/step - loss: 11.7512 - accuracy: 0.0981
Epoch 5/5
320/320 [==============================] - 2s 6ms/step - loss: 11.7487 - accuracy: 0.0980

15.
(b) Write a program to implement LSTM and perform time series analysis using LSTM.
CODE:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
import numpy as np

time_series_data = np.random.random((1000, 10))


target_data = np.random.random((1000, 1))

train_size = int(len(time_series_data) * 0.8)


test_size = len(time_series_data) - train_size
x_train, x_test = time_series_data[0:train_size],
time_series_data[train_size:len(time_series_data)]
y_train, y_test = target_data[0:train_size], target_data[train_size:len(target_data)]

x_train = x_train.reshape((x_train.shape[0], 1, x_train.shape[1]))


x_test = x_test.reshape((x_test.shape[0], 1, x_test.shape[1]))

model = Sequential()
model.add(LSTM(50, input_shape=(x_train.shape[1], x_train.shape[2])))
model.add(Dense(1))

model.compile(loss='mean_squared_error', optimizer='adam')

model.fit(x_train, y_train, epochs=10, batch_size=32, verbose=1)

train_score = model.evaluate(x_train, y_train, verbose=0)


test_score = model.evaluate(x_test, y_test, verbose=0)
print('Train MSE:', train_score)
print('Test MSE:', test_score)

OUTPUT:
Epoch 1/10
25/25 [==============================] - 0s 4ms/step - loss: 0.5321
Epoch 2/10
25/25 [==============================] - 0s 4ms/step - loss: 0.3142
Epoch 3/10
25/25 [==============================] - 0s 4ms/step - loss: 0.2197
Epoch 4/10
25/25 [==============================] - 0s 4ms/step - loss: 0.1616
Epoch 5/10
25/25 [==============================] - 0s 4ms/step - loss: 0.1238
Epoch 6/10
25/25 [==============================] - 0s 4ms/step - loss: 0.0963
Epoch 7/10
25/25 [==============================] - 0s 4ms/step - loss: 0.0757

16.
Epoch 8/10
25/25 [==============================] - 0s 4ms/step - loss: 0.0612
Epoch 9/10
25/25 [==============================] - 0s 4ms/step - loss: 0.0515
Epoch 10/10
25/25 [==============================] - 0s 4ms/step - loss: 0.0451
Train MSE: 0.042406331688165665
Test MSE: 0.039153248846530914

17.
Practical 8: a) Write a program to perform object detection using Deep Learning
CODE:
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
import matplotlib.pyplot as plt

base_model = ResNet50(weights='imagenet', include_top=False)

input_tensor = Input(shape=(None, None, 3))


x = base_model(input_tensor)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(256, activation='relu')(x)
x = tf.keras.layers.Dense(128, activation='relu')(x)
predictions = tf.keras.layers.Dense(num_classes, activation='softmax')(x)

faster_rcnn_model = Model(inputs=input_tensor, outputs=predictions)

img_path = 'path/to/your/image.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

predictions = faster_rcnn_model.predict(x)

class_labels = ['class1', 'class2', 'class3']


confidence_scores = predictions[0]

threshold = 0.5
filtered_detections = [(class_labels[i], score) for i, score in
enumerate(confidence_scores) if score > threshold]

for label, score in filtered_detections:


print(f"{label}: {score:.2f}")

plt.imshow(img)
for label, score in filtered_detections:
plt.text(10, 10, f"{label}: {score:.2f}", color='red', fontsize=12,
bbox=dict(facecolor='white', alpha=0.8))
plt.axis('off')
plt.show()

18.
OUTPUT:

19.
(b) Dog-Breed Classifier – Design and train a convolutional neural network to analyze images of dogs and
correctly identify their breeds. Use transfer learning and well-known architectures to improve this model.
CODE:
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator

base_model = InceptionV3(weights='imagenet', include_top=False)

x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(num_classes, activation='softmax')(x)

model = Model(inputs=base_model.input, outputs=predictions)

for layer in base_model.layers:


layer.trainable = False

model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])

train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)

train_generator = train_datagen.flow_from_directory(
'train_data_directory',
target_size=(299, 299),
batch_size=batch_size,
class_mode='categorical'
)

model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs
)

20.
test_datagen = ImageDataGenerator(rescale=1./255)

test_generator = test_datagen.flow_from_directory(
'test_data_directory',
target_size=(299, 299),
batch_size=batch_size,
class_mode='categorical'
)

test_loss, test_acc = model.evaluate_generator(test_generator)


print('Test accuracy:', test_acc)

OUTPUT:

21.
Practical 9. a) Write a program to demonstrate different activation function
CODE:

SIGMOID FUNCTION:
import numpy as np
import matplotlib.pyplot as plt

def sigmoid_Activation_fun(inp):
return 1 / (1 + np.exp(-inp))

inp= np.linspace(-10, 10, 100)


out = sigmoid_Activation_fun(inp);

plt.plot(inp, out)
plt.xlabel('Input')
plt.ylabel('Output')
plt.title('Sigmoid Activation Function')
plt.show()

OUTPUT:

RECTIFIED LINEAR UNIT (RELU)


import numpy as np
import matplotlib.pyplot as plt

def relu_activation_fun(inp):
return np.maximum(0, inp)

inp = np.linspace(-10, 10, 100)

out = relu_activation_fun(inp)

22.
plt.plot(inp, out)
plt.xlabel('Input')
plt.ylabel('Output')
plt.title('ReLU Activation Function')
plt.show()

OUTPUT:

HYPERBOLIC TANGENT (TANH)


import numpy as np
import matplotlib.pyplot as plt

def Hyperbolic_tanh_fun(inp):
return np.tanh(inp)

inp = np.linspace(-10, 10, 100)

out= Hyperbolic_tanh_fun(inp)

plt.plot(inp, out)
plt.xlabel('Input')
plt.ylabel('Output')
plt.title('Tanh Activation Function')
plt.show()

OUTPUT:

23.
SOFTMAX ACTIVATION FUNCTION
import numpy as np
import matplotlib.pyplot as plt

def softmax_activation_function(inp):
exps = np.exp(inp)
return exps / np.sum(exps)

inp = np.array([1, 2, 3, 4, 5])

out = softmax_activation_function(inp)

plt.bar(range(len(inp)), out)
plt.xlabel('Class')
plt.ylabel('Probability')
plt.xticks(range(len(inp)))
plt.title('Softmax Activation Function')
plt.show()

OUTPUT:

24.
(b) Write a program in TensorFlow to demonstrate different Loss functions.

MEAN SQUARED ERROR (MSE)


import tensorflow as tf
import numpy as np

X = np.random.rand(100, 1)
y = 3 * X + 2 + np.random.randn(100, 1) * 0.1

model = tf.keras.Sequential([
tf.keras.layers.Dense(1, input_shape=(1,))
])

model.compile(optimizer='adam', loss='mean_squared_error')

model.fit(X, y, epochs=100, verbose=0)

loss_mse = model.evaluate(X, y, verbose=0)


print(f"MSE Loss: {loss_mse:.4f}")

MEAN ABSOLUTE ERROR (MAE)


model.compile(optimizer='adam', loss='mean_absolute_error')

model.fit(X, y, epochs=100, verbose=0)

loss_mae = model.evaluate(X, y, verbose=0)


print(f"MAE Loss: {loss_mae:.4f}")

HUBER LOSS
def huber_loss(y_true, y_pred, delta=1.0):

error = y_true - y_pred


abs_error = tf.abs(error)
quadratic = tf.minimum(abs_error, delta)
linear = abs_error - quadratic
return 0.5 * quadratic**2 + delta * linear

model.compile(optimizer='adam', loss=huber_loss)

model.fit(X, y, epochs=100, verbose=0)

loss_huber = model.evaluate(X, y, verbose=0)


print(f"Huber Loss: {loss_huber:.4f}")

25.
OUTPUT:
MSE Loss: 0.0098
MAE Loss: 0.0773
Huber Loss: 0.0076

26.
Practical 10: Write a program to build an Artificial Neural Network by implementing the Back propagation
algorithm and test the same using appropriate data sets
CODE:
import numpy as np

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])


y = np.array([[0], [1], [1], [0]])

input_size = 2
hidden_size = 4
output_size = 1

W1 = np.random.randn(input_size, hidden_size)
b1 = np.zeros((1, hidden_size))
W2 = np.random.randn(hidden_size, output_size)
b2 = np.zeros((1, output_size))

learning_rate = 0.1
epochs = 10000
for epoch in range(epochs):

hidden_layer_input = np.dot(X, W1) + b1


hidden_layer_output = 1 / (1 + np.exp(-hidden_layer_input))
output_layer_input = np.dot(hidden_layer_output, W2) + b2
predicted_output = 1 / (1 + np.exp(-output_layer_input))

error = y - predicted_output
d_output = error * (predicted_output * (1 - predicted_output))
error_hidden_layer = d_output.dot(W2.T)
d_hidden_layer = error_hidden_layer * (hidden_layer_output * (1 -
hidden_layer_output))
W2 += hidden_layer_output.T.dot(d_output) * learning_rate
b2 += np.sum(d_output, axis=0) * learning_rate
W1 += X.T.dot(d_hidden_layer) * learning_rate
b1 += np.sum(d_hidden_layer, axis=0) * learning_rate

test_input = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])


test_output = 1 / (1 + np.exp(-(np.dot(1 / (1 + np.exp(-(np.dot(test_input, W1) + b1))),
W2) + b2)))

print("Predictions:")
print(test_output)

27.
OUTPUT:
Predictions:
[[0.00648942]
[0.99268297]
[0.99268297]
[0.00784392]]

28.

You might also like