complete_dl_record
complete_dl_record
01
NN & DL Date
Experiment 1
Aim: Setting up the Jupyter IDE Environment and Executing a Python Program
6. This is an optional step. This is for the case where you didn’t check the box in
step 5 and now want to add Anaconda to your path in the environment variables
NN & DL Date
7. Find and open the Anaconda Prompt app using the search bar.
8. Once the Anaconda Prompt app opens, navigate to the desired folder, using the cd command.
9. Once in the desired folder, type Jupyter notebook followed by the Enter key.
10. The Jupyter server will start. You should see some server logs printed. You may be prompted
to select an application to open Jupyter in. Firefox or Chrome are preferred.
11. Shortly after, a browser window should open, showing the files and folders located in the
folder where you started the Jupyter server.
Result: We have successfully installed Anaconda and set up the Jupyter IDE
and have executed a Python program to check whether an input number is
Palindrome or not.
NN & DL Date
Experiment 2
Aim: Installing Tensor flow and PyTorch Libraries and make use of them
Tensor-flow with conda is supported on 64-bit Windows 7 or later, 64-bit Ubuntu, Linux 14.04 or
later, 64 bit CentOS Linux 6 or later and macOs 10.10 or later.
1. On Windows open the Start menu and open Anaconda Command Prompt.
2. Choose a name for your TensorFlow environment, such as “tf"
3. To install the current release of CPU-only TensorFlow, recommended for beginners.
conda create -n tf tensorflow
conda activate tf
4. Or, to install the current release of GPU TensorFlow on Linux or conda create Windows:
conda create-n tf-gpu tensorflow-gpu
conda activate tf-gpu
5. Now go to Anaconda Navigator and change the environment to tf-gpu from base.
NN & DL Date
2.6.0
Name: keras
Version: 2.13.1
Summary: Deep learning for humans.
Home -page: https://keras.io/
Author: Keras team
Author-email: keras [email protected]
License: Apache 2.0
Location: c:\users\mgit\anaconda3\envs\tf- gpu\lib\site-packages
Requires:
Required-by: tensorflow
import tensorflow as tf
x= tf.constant ([[1., 2., 3.],[4., 5., 6.]])
print(x)
print(x.shape)
print (x.dtype)
NN & DL Date
import torch
x = torch.rand(5, 3)
print (x)
Result: We have successfully installed Tensorflow and Keras and executed simple programs
NN & DL Date
Experiment 3
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from tensorflow.keras.optimizers import Adam
import PIL
import os
import os.path
from PIL import Image
f= r'C:\Users MGIT\Desktop\cd dataset\train\cat’
NN & DL Date
import PIL
import os
import os.path
from PIL import Image
f= r'C:\Users MGIT\Desktop\cd dataset\test\dog’
for file in os.listdir(f):
f_img= f+ “/" +file
img=Image.open(f_img)
img=img.resize((112, 112))
img.save(f_img)
import PIL
import os
import os.path
from PIL import Image
f= r'C:\Users MGIT\Desktop\cd dataset\test\cat’
for file in os.listdir(f):
f_img= f+ “/" +file
img=Image.open(f_img)
img=img.resize((112, 112))
img.save(f_img)
NN & DL Date
Image Preprocessing
IMAGE_SIZE = 112
BATCH_SIZE= 32
train_data_size = 180
test_data = 20
Model Building
model= Sequential([
Conv2D(32,(3,3),activation='relu', input_shape=(112,112,3)),
MaxPool2D(2,2),
Conv2D(32,(3,3),activation=‘relu' ,input_shape=(112, 112, 3)),
MaxPool2D(2,2),
Flatten(),
Dense(100, activation=‘relu' ),
Dense(1, activation=‘sigmoid')
]
)
model.summary()
NN & DL Date
Result:Trained a neural network model to classify the dogs and cats images
NN & DL Date
Experiment 4
Aim: Image Classification on MNIST dataset (CNN model with Fully connected
Layer)
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from tensorflow.keras.optimizers import Adam
image_size = 64
batch_size = 32
train = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255,
rotation_range=90, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
train_set = train.flow_from_directory(r’ ./dataset_mnist/train’,
target_size=(image_size, image_size), batch_size= batch_size,
class_mode=‘categorical’)
NN & DL Date
Model Building
model= Sequential([
Conv2D(32,(3,3),activation='relu', input_shape=(112,112,3)),
MaxPool2D(2,2),
Conv2D(64,(3,3),activation=‘relu'),
MaxPool2D(2,2),
Conv2D(64,(3,3),activation=‘relu'),
MaxPool2D(2,2),
Flatten(),
Dense(100, activation=‘relu' ),
Dense(1, activation=‘sigmoid')
]
)
model.summary()
NN & DL Date
NN & DL Date
Experiment 5
Aim: Applying the pre-trained model VGG16 for MNIST Dataset Classification
image_size = 64
batch_size = 32
train = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255,
rotation_range=90, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
NN & DL Date
NN & DL Date
model.compile(loss=“categorical_crossentropy”, optimizer=‘Adam’,metrics=[‘accuracy’])
NN & DL Date
Experiment 6
Aim: Training a Sentiment Analysis model on IMDB dataset using RNN with
LSTM notes
Importing The Libraries
import numpy as np
from keras.models import Sequential
from keras.preprocessing import sequence
from keras.layers import Dropout, Dense, Embedding, LSTM
from keras.datasets import imdb
from keras.callbacks import EarlyStopping
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import re
import nltk
nltk.download(‘stopwords’)
nltk.download(‘wordnet’)
Loading Datasets
Preprocessing Data
def preprocess_text(text):
text = re.sub(r'<[^>]+>', '', text)
text = re.sub(r'\d+', '', text)
text = re.sub(r'[^\w\s]', '', text)
NN & DL Date
text = text.lower()
stop_words = set(stopwords.words('english'))
words = text.split()
words = [word for word in words if word.lower() not in stop_words]
lemmatizer = WordNetLemmatizer()
words = [lemmatizer.lemmatize(word) for word in words]
return ' ‘.join(words)
x_train_text = [' '.join([reverse_word_index.get(i - 3, '?') for i in sequence]) for sequence in x_train]
x_test_text = [' '.join([reverse_word_index.get(i - 3, '?') for i in sequence]) for sequence in x_test]
x_train_text = [preprocess_text(text) for text in x_train_text]
x_test_text = [preprocess_text(text) for text in x_test_text]
maxlen= 200
tokenizer= Tokenizer(num_words=10000)
tokenizer.fit_on_texts(x_train_text)
x_train_seq = tokenizer.texts_to_sequences(x_train_text)
x_test_seq = tokenizer.texts_to_sequences(x_test_text)
y_train = np.array(y_train)
y_test = np.array(y_test)
n_unique_words = 10000
model = Sequential()
model.add(Embedding(n_unique_words, 64, input_length=maxlen))
model.add(LSTM(32))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[‘accuracy'])
history = model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
NN & DL Date
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Loss vs Accuracy')
plt.xlabel('Epoch')
plt.legend(['Loss', 'Accuracy', 'Val_Loss', 'Val_Accuracy'], loc='upper right')
plt.show()
sample_text = "This is a great movie with fantastic performances!"
sample_text = preprocess_text(sample_text)
tokenized_sample = tokenizer.texts_to_sequences([sample_text])
padded_sample = pad_sequences(tokenized_sample, maxlen=maxlen)
prediction = model.predict(padded_sample)
threshold = 0.5
NN & DL Date
Output: Trained a sentiment analysis model on IMDB dataset using RNN layers and
LSTM notes and made predictions on sample text.
NN & DL Date
Experiment 7
Aim: Applying the Auto encoder algorithms for encoding the real-world data.
Model Architecture
encoding_dim = 15
input_img = Input(shape=(784,))
encoded = Dense(encoding_dim, activation='relu')(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)
autoencoder = Model(input_img, decoded)
Model Compilation
autoencoder.compile(optimizer='adam', loss=‘binary_crossentropy')
Data Preparation
NN & DL Date
Output: (60000,784)
(10000,784)
Model Fitting
autoencoder.fit(
x_train, x_train,
epochs=15,
batch_size=256,
validation_data=(x_test, x_test)
)
NN & DL Date
plt.figure(figsize=(20, 6))
encoded_img = encoder.predict(x_test)
decoded_img = decoder.predict(encoded_img)
import random
i = random.randint(0, 10)
print("Original image")
ax = plt.subplot(3, 1, 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
print("Encoded image")
encoded_image = encoded_img[i].reshape(encoding_dim, 1)
ax = plt.subplot(3, 1, 2)
plt.imshow(encoded_image, aspect=0.05)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
NN & DL Date
Output: Applied Auto encoder algorithm on MNIST dataset and displayed the original, encoded
and decoded images
NN & DL Date
Experiment 8
Aim: : Applying Generative Adversial Networks for image generation and
Unsupervised Tasks
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from IPython import display
Loading Datasets
Generator Model
BUFFER_SIZE = 10000
BATCH_SIZE = 128
train_dataset=tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
def make_generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 256)))
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
NN & DL Date
generator = make_generator_model()
Discriminator Model
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
discriminator = make_discriminator_model()
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
return real_loss + fake_loss
Loss Function
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
NN & DL Date
Optimizers
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
EPOCHS = 100
noise_dim = 100
num_examples_to_generate = 16
seed = tf.random.normal([num_examples_to_generate, noise_dim])
@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,
discriminator.trainable_variables))
NN & DL Date
NN & DL Date
Output: Trained Generative Adversial Network on MNIST dataset for image generation.