Deep Learning
Deep Learning
PRACTICAL FILE
Subject Name:- DEEP LEARNING WITH PYTHON
Subject Code:- BCAP 314
self.input_size = input_size
self.learning_rate = learning_rate
self.epochs = epochs
self.weights = np.zeros(input_size + 1) # Initialize weights (including bias)
for _ in range(self.epochs):
for i in range(X.shape[0]):
prediction = self.predict(X[i])
error = y[i] - prediction
self.weights[1:] += self.learning_rate * error * X[i]
self.weights[0] += self.learning_rate * error
if __name__ == "__main__":
# Generate some random training data
X_train = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_train = np.array([0, 0, 0, 1])
perceptron = Perceptron(input_size=2)
perceptron.train(X_train, y_train)
OUTPUT:
1.
Practical 2: Write a program to implement multi-layer perceptron using TensorFlow. Apply multi-layer
perceptron (MLP) on the Iris dataset.
CODE:
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
iris = load_iris()
X = iris.data
y = iris.target.reshape(-1, 1)
onehot_encoder = OneHotEncoder(sparse=False)
y = onehot_encoder.fit_transform(y)
learning_rate = 0.001
training_epochs = 1000
display_step = 50
n_input = X_train.shape[1]
n_hidden_1 = 10
n_hidden_2 = 10
n_classes = y_train.shape[1]
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
def multilayer_perceptron(x):
layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['h1']), biases['b1']))
2.
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']))
output_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return output_layer
logits = multilayer_perceptron(X_placeholder)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
labels=y_placeholder))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
init = tf.global_variables_initializer()
print("Optimization Finished!")
OUTPUT:
Epoch 50, Loss= 1.5843, Training Accuracy= 0.350
Epoch 100, Loss= 1.2712, Training Accuracy= 0.350
Epoch 150, Loss= 1.0546, Training Accuracy= 0.350
Epoch 200, Loss= 0.8949, Training Accuracy= 0.367
Epoch 250, Loss= 0.7794, Training Accuracy= 0.600
Epoch 300, Loss= 0.6998, Training Accuracy= 0.675
Epoch 350, Loss= 0.6430, Training Accuracy= 0.708
Epoch 400, Loss= 0.5990, Training Accuracy= 0.750
Epoch 450, Loss= 0.5617, Training Accuracy= 0.792
Epoch 500, Loss= 0.5288, Training Accuracy= 0.808
Epoch 550, Loss= 0.4987, Training Accuracy= 0.833
Epoch 600, Loss= 0.4704, Training Accuracy= 0.850
Epoch 650, Loss= 0.4433, Training Accuracy= 0.867
Epoch 700, Loss= 0.4167, Training Accuracy= 0.875
Epoch 750, Loss= 0.3899, Training Accuracy= 0.875
Epoch 800, Loss= 0.3638, Training Accuracy= 0.883
3.
Epoch 850, Loss= 0.3386, Training Accuracy= 0.883
Epoch 900, Loss= 0.3146, Training Accuracy= 0.892
Epoch 950, Loss= 0.2924, Training Accuracy= 0.900
Epoch 1000, Loss= 0.2719, Training Accuracy= 0.900
Optimization Finished!
Testing Accuracy: 0.93333334
4.
Practical 3. (a) Write a program to implement a Convolution Neural Network (CNN) in Keras. Perform
predictions using the trained Convolution Neural Network (CNN).
CODE:
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from keras.datasets import mnist
from keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
predictions = model.predict(X_test[:10])
predicted_classes = np.argmax(predictions, axis=1)
print("Predicted classes:", predicted_classes)
5.
OUTPUT:
Epoch 1/5
60000/60000 [==============================] - 44s 738us/step - loss: 0.2464 - accuracy:
0.9271 - val_loss: 0.0839 - val_accuracy: 0.9737
Epoch 2/5
60000/60000 [==============================] - 42s 701us/step - loss: 0.0701 - accuracy:
0.9788 - val_loss: 0.0469 - val_accuracy: 0.9841
Epoch 3/5
60000/60000 [==============================] - 42s 701us/step - loss: 0.0490 - accuracy:
0.9852 - val_loss: 0.0382 - val_accuracy: 0.9873
Epoch 4/5
60000/60000 [==============================] - 42s 700us/step - loss: 0.0376 - accuracy:
0.9883 - val_loss: 0.0320 - val_accuracy: 0.9895
Epoch 5/5
60000/60000 [==============================] - 42s 700us/step - loss: 0.0310 - accuracy:
0.9903 - val_loss: 0.0334 - val_accuracy: 0.9887
10000/10000 [==============================] - 3s 306us/step
Test Loss: 0.03337173608099645
Test Accuracy: 0.988700032711029
Predicted classes: [7 2 1 0 4 1 4 9 6 9]
6.
(b) Write a program to build an Image Classifier with CIFAR-10 Data.
CODE:
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, batch_size=64, validation_split=0.1)
test_loss, test_acc = model.evaluate(x_test, y_test)
7.
OUTPUT:
Epoch 1/10
704/704 [==============================] - 51s 71ms/step - loss: 1.5155 - accuracy: 0.4481
- val_loss: 1.2701 - val_accuracy: 0.5476
Epoch 2/10
704/704 [==============================] - 50s 70ms/step - loss: 1.1580 - accuracy: 0.5912
- val_loss: 1.0725 - val_accuracy: 0.6214
Epoch 3/10
704/704 [==============================] - 50s 70ms/step - loss: 1.0043 - accuracy: 0.6476
- val_loss: 0.9917 - val_accuracy: 0.6570
Epoch 4/10
704/704 [==============================] - 49s 70ms/step - loss: 0.9035 - accuracy: 0.6827
- val_loss: 0.9283 - val_accuracy: 0.6818
Epoch 5/10
704/704 [==============================] - 50s 71ms/step - loss: 0.8346 - accuracy: 0.7056
- val_loss: 0.9001 - val_accuracy: 0.6888
Epoch 6/10
704/704 [==============================] - 50s 71ms/step - loss: 0.7775 - accuracy: 0.7273
- val_loss: 0.9134 - val_accuracy: 0.6930
Epoch 7/10
704/704 [==============================] - 50s 71ms/step - loss: 0.7275 - accuracy: 0.7440
- val_loss: 0.8881 - val_accuracy: 0.7000
Epoch 8/10
704/704 [==============================] - 49s 70ms/step - loss: 0.6845 - accuracy: 0.7593
- val_loss: 0.8662 - val_accuracy: 0.7124
Epoch 9/10
704/704 [==============================] - 50s 71ms/step - loss: 0.6413 - accuracy: 0.7755
- val_loss: 0.8911 - val_accuracy: 0.7080
Epoch 10/10
704/704 [==============================] - 50s 71ms/step - loss: 0.6027 - accuracy: 0.7880
- val_loss: 0.9071 - val_accuracy: 0.7078
313/313 [==============================] - 3s 10ms/step - loss: 0.9215 - accuracy: 0.7031
Test accuracy: 0.7031000256538391
8.
Practical 4. a) Write a program to perform face detection using CNN
CODE:
from keras.preprocessing.image import ImageDataGenerator
TrainingImagePath = '/path/to/training/images'
train_datagen = ImageDataGenerator(
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True
)
test_datagen = ImageDataGenerator()
training_set = train_datagen.flow_from_directory(
TrainingImagePath,
target_size=(64, 64),
batch_size=32,
class_mode='categorical'
)
test_set = test_datagen.flow_from_directory(
TrainingImagePath,
target_size=(64, 64),
batch_size=32,
class_mode='categorical'
)
print(test_set.class_indices)
OUTPUT:
9.
b) Write a program to demonstrate hyperparameter tuning in CNN.
CODE:
import numpy as np
from sklearn.model_selection import GridSearchCV
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.datasets import mnist
OUTPUT:
Best Accuracy: 0.9832666708628349
Best Parameters: {'activation': 'relu', 'kernel_size': (3, 3), 'optimizer': 'adam',
'pool_size': (2, 2)}
10.
(c)Predicting Bike-Sharing Patterns – Build and train neural networks from scratch to predict the number of
bike share users on a given day.
CODE:
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
bike_data = pd.read_csv('bike_sharing_data.csv')
X = bike_data.drop(columns=['cnt'])
y = bike_data['cnt']
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(X_train.shape[1],)))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error')
predictions = model.predict(X_test_scaled)
OUTPUT:
Epoch 1/50
584/584 [==============================] - 1s 1ms/step - loss: 19278.2168 - val_loss:
10131.5879
Epoch 2/50
584/584 [==============================] - 1s 1ms/step - loss: 8279.4697 - val_loss:
6395.8965
...
Epoch 50/50
584/584 [==============================] - 1s 1ms/step - loss: 3031.0266 - val_loss:
3017.9963
152/152 [==============================] - 0s 927us/step - loss: 3044.9084
Test Loss: 3044.908447265625
11.
Practical 5. Write a program to build auto-encoder in Keras.
CODE:
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import mnist
import numpy as np
encoding_dim = 32
input_img = Input(shape=(784,))
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
encoded_imgs = autoencoder.predict(x_test)
OUTPUT:
Epoch 1/50
235/235 [==============================] - 2s 6ms/step - loss: 0.2814 - val_loss: 0.1831
Epoch 2/50
235/235 [==============================] - 1s 5ms/step - loss: 0.1664 - val_loss: 0.1479
Epoch 3/50
235/235 [==============================] - 1s 5ms/step - loss: 0.1411 - val_loss: 0.1325
...
Epoch 48/50
235/235 [==============================] - 1s 5ms/step - loss: 0.0997 - val_loss: 0.0992
Epoch 49/50
235/235 [==============================] - 1s 5ms/step - loss: 0.0997 - val_loss: 0.0992
Epoch 50/50
235/235 [==============================] - 1s 5ms/step - loss: 0.0997 - val_loss: 0.0991
12.
Practical 6. Write a program to implement basic reinforcement learning algorithm to teach a bot to reach its
destination.
CODE:
import numpy as np
# 0: Empty cell, 1: Obstacle, 2: Destination
environment = np.array([
[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 2]
])
Q = np.zeros((5, 5, 4))
alpha = 0.1
gamma = 0.9
epsilon = 0.1
for _ in range(1000):
state = (0, 0)
while True:
if np.random.rand() < epsilon:
action = np.random.choice(4
else:
action = np.argmax(Q[state[0], state[1], :])
state = next_state
if environment[state[0], state[1]] == 2:
break
state = (0, 0)
path = [(0, 0)]
while True:
action = np.argmax(Q[state[0], state[1], :])
next_state = (state[0] + actions[action][0], state[1] + actions[action][1])
path.append(next_state)
state = next_state
if environment[state[0], state[1]] == 2:
break
print("Optimal path:")
for p in path:
print(p)
13.
OUTPUT:
Optimal path:
(0, 0)
(1, 0)
(2, 0)
(2, 1)
(2, 2)
(3, 2)
(4, 2)
(4, 3)
(4, 4)
14.
Practical 7. (a) Write a program to implement a Recurrent Neural Network
CODE:
data_dim = 16
timesteps = 8
num_classes = 10
batch_size = 32
model = Sequential()
model.add(SimpleRNN(32, return_sequences=True, input_shape=(timesteps, data_dim)))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
OUTPUT:
Epoch 1/5
320/320 [==============================] - 2s 6ms/step - loss: 11.8621 - accuracy: 0.0975
Epoch 2/5
320/320 [==============================] - 2s 6ms/step - loss: 11.7602 - accuracy: 0.0972
Epoch 3/5
320/320 [==============================] - 2s 6ms/step - loss: 11.7543 - accuracy: 0.0986
Epoch 4/5
320/320 [==============================] - 2s 6ms/step - loss: 11.7512 - accuracy: 0.0981
Epoch 5/5
320/320 [==============================] - 2s 6ms/step - loss: 11.7487 - accuracy: 0.0980
15.
(b) Write a program to implement LSTM and perform time series analysis using LSTM.
CODE:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
import numpy as np
model = Sequential()
model.add(LSTM(50, input_shape=(x_train.shape[1], x_train.shape[2])))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
OUTPUT:
Epoch 1/10
25/25 [==============================] - 0s 4ms/step - loss: 0.5321
Epoch 2/10
25/25 [==============================] - 0s 4ms/step - loss: 0.3142
Epoch 3/10
25/25 [==============================] - 0s 4ms/step - loss: 0.2197
Epoch 4/10
25/25 [==============================] - 0s 4ms/step - loss: 0.1616
Epoch 5/10
25/25 [==============================] - 0s 4ms/step - loss: 0.1238
Epoch 6/10
25/25 [==============================] - 0s 4ms/step - loss: 0.0963
Epoch 7/10
25/25 [==============================] - 0s 4ms/step - loss: 0.0757
16.
Epoch 8/10
25/25 [==============================] - 0s 4ms/step - loss: 0.0612
Epoch 9/10
25/25 [==============================] - 0s 4ms/step - loss: 0.0515
Epoch 10/10
25/25 [==============================] - 0s 4ms/step - loss: 0.0451
Train MSE: 0.042406331688165665
Test MSE: 0.039153248846530914
17.
Practical 8: a) Write a program to perform object detection using Deep Learning
CODE:
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
import matplotlib.pyplot as plt
img_path = 'path/to/your/image.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
predictions = faster_rcnn_model.predict(x)
threshold = 0.5
filtered_detections = [(class_labels[i], score) for i, score in
enumerate(confidence_scores) if score > threshold]
plt.imshow(img)
for label, score in filtered_detections:
plt.text(10, 10, f"{label}: {score:.2f}", color='red', fontsize=12,
bbox=dict(facecolor='white', alpha=0.8))
plt.axis('off')
plt.show()
18.
OUTPUT:
19.
(b) Dog-Breed Classifier – Design and train a convolutional neural network to analyze images of dogs and
correctly identify their breeds. Use transfer learning and well-known architectures to improve this model.
CODE:
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(num_classes, activation='softmax')(x)
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
train_generator = train_datagen.flow_from_directory(
'train_data_directory',
target_size=(299, 299),
batch_size=batch_size,
class_mode='categorical'
)
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs
)
20.
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
'test_data_directory',
target_size=(299, 299),
batch_size=batch_size,
class_mode='categorical'
)
OUTPUT:
21.
Practical 9. a) Write a program to demonstrate different activation function
CODE:
SIGMOID FUNCTION:
import numpy as np
import matplotlib.pyplot as plt
def sigmoid_Activation_fun(inp):
return 1 / (1 + np.exp(-inp))
plt.plot(inp, out)
plt.xlabel('Input')
plt.ylabel('Output')
plt.title('Sigmoid Activation Function')
plt.show()
OUTPUT:
def relu_activation_fun(inp):
return np.maximum(0, inp)
out = relu_activation_fun(inp)
22.
plt.plot(inp, out)
plt.xlabel('Input')
plt.ylabel('Output')
plt.title('ReLU Activation Function')
plt.show()
OUTPUT:
def Hyperbolic_tanh_fun(inp):
return np.tanh(inp)
out= Hyperbolic_tanh_fun(inp)
plt.plot(inp, out)
plt.xlabel('Input')
plt.ylabel('Output')
plt.title('Tanh Activation Function')
plt.show()
OUTPUT:
23.
SOFTMAX ACTIVATION FUNCTION
import numpy as np
import matplotlib.pyplot as plt
def softmax_activation_function(inp):
exps = np.exp(inp)
return exps / np.sum(exps)
out = softmax_activation_function(inp)
plt.bar(range(len(inp)), out)
plt.xlabel('Class')
plt.ylabel('Probability')
plt.xticks(range(len(inp)))
plt.title('Softmax Activation Function')
plt.show()
OUTPUT:
24.
(b) Write a program in TensorFlow to demonstrate different Loss functions.
X = np.random.rand(100, 1)
y = 3 * X + 2 + np.random.randn(100, 1) * 0.1
model = tf.keras.Sequential([
tf.keras.layers.Dense(1, input_shape=(1,))
])
model.compile(optimizer='adam', loss='mean_squared_error')
HUBER LOSS
def huber_loss(y_true, y_pred, delta=1.0):
model.compile(optimizer='adam', loss=huber_loss)
25.
OUTPUT:
MSE Loss: 0.0098
MAE Loss: 0.0773
Huber Loss: 0.0076
26.
Practical 10: Write a program to build an Artificial Neural Network by implementing the Back propagation
algorithm and test the same using appropriate data sets
CODE:
import numpy as np
input_size = 2
hidden_size = 4
output_size = 1
W1 = np.random.randn(input_size, hidden_size)
b1 = np.zeros((1, hidden_size))
W2 = np.random.randn(hidden_size, output_size)
b2 = np.zeros((1, output_size))
learning_rate = 0.1
epochs = 10000
for epoch in range(epochs):
error = y - predicted_output
d_output = error * (predicted_output * (1 - predicted_output))
error_hidden_layer = d_output.dot(W2.T)
d_hidden_layer = error_hidden_layer * (hidden_layer_output * (1 -
hidden_layer_output))
W2 += hidden_layer_output.T.dot(d_output) * learning_rate
b2 += np.sum(d_output, axis=0) * learning_rate
W1 += X.T.dot(d_hidden_layer) * learning_rate
b1 += np.sum(d_hidden_layer, axis=0) * learning_rate
print("Predictions:")
print(test_output)
27.
OUTPUT:
Predictions:
[[0.00648942]
[0.99268297]
[0.99268297]
[0.00784392]]
28.