Date:
Aim:
Algorithm:
Program:
import numpy as np
class Perceptron:
def __init__(self, learning_rate=0.01, n_iter=1000):
self.learning_rate = learning_rate
self.n_iter = n_iter
self.weights = None
self.bias = None
def fit(self, X, y):
"""
Fit the model to the data.
X: ndarray, shape (n_samples, n_features) - Input features.
y: ndarray, shape (n_samples,) - Target labels (-1 or 1).
"""
n_samples, n_features = X.shape
self.weights = np.zeros(n_features)
self.bias = 0
# Ensure y is either -1 or 1
y = np.where(y <= 0, -1, 1)
for _ in range(self.n_iter):
for idx, x_i in enumerate(X):
linear_output = np.dot(x_i, self.weights) + self.bias
y_predicted = np.sign(linear_output)
# Update weights and bias if there is a misclassification
if y_predicted != y[idx]:
self.weights += self.learning_rate * y[idx] * x_i
self.bias += self.learning_rate * y[idx]
def predict(self, X):
"""
Predict labels for given input data.
X: ndarray, shape (n_samples, n_features) - Input features.
Returns: ndarray, shape (n_samples,) - Predicted labels (-1 or 1).
"""
linear_output = np.dot(X, self.weights) + self.bias
return np.sign(linear_output)
# Example usage:
if __name__ == "__main__":
# Example dataset
X = np.array([
[1, 2],
[2, 3],
[3, 4],
[1, 0],
[0, 1],
[3, 1]
])
y = np.array([1, 1, 1, -1, -1, -1]) # Binary labels
# Create and train the perceptron
perceptron = Perceptron(learning_rate=0.1, n_iter=10)
perceptron.fit(X, y)
# Predict new data points
predictions = perceptron.predict(X)
print("Predicted labels:", predictions)
print("Actual labels: ", y)
OUTPUT:
Predicted labels: [ 1. 1. 1. -1. -1. -1.]
Actual labels: [ 1 1 1 -1 -1 -1]
Result:
EX NO:2 Implementing a Feed-Forward Neural Network for Regression
Date:
Aim:
Algorithm:
Program
import numpy as np
class FeedForwardNN:
def __init__(self, n_input, n_hidden, n_output, learning_rate=0.01):
self.learning_rate = learning_rate
# Initialize weights and biases
self.weights_input_hidden = np.random.randn(n_input, n_hidden) * 0.1
self.bias_hidden = np.zeros(n_hidden)
self.weights_hidden_output = np.random.randn(n_hidden, n_output) * 0.1
self.bias_output = np.zeros(n_output)
def sigmoid(self, x):
"""Sigmoid activation function."""
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
"""Derivative of the sigmoid function."""
return x * (1 - x)
def forward(self, X):
"""Forward pass."""
self.hidden_input = np.dot(X, self.weights_input_hidden) + self.bias_hidden
self.hidden_output = self.sigmoid(self.hidden_input)
self.final_input = np.dot(self.hidden_output, self.weights_hidden_output) + self.bias_output
self.final_output = self.final_input # Linear activation for regression
return self.final_output
def backward(self, X, y, output):
"""Backward pass."""
# Calculate errors
error = y - output
output_gradient = -2 * error
# Backpropagation
hidden_error = np.dot(output_gradient, self.weights_hidden_output.T)
hidden_gradient = hidden_error * self.sigmoid_derivative(self.hidden_output)
# Update weights and biases
self.weights_hidden_output -= self.learning_rate * np.dot(self.hidden_output.T, output_gradient)
self.bias_output -= self.learning_rate * np.sum(output_gradient, axis=0)
self.weights_input_hidden -= self.learning_rate * np.dot(X.T, hidden_gradient)
self.bias_hidden -= self.learning_rate * np.sum(hidden_gradient, axis=0)
def fit(self, X, y, epochs):
"""Train the neural network."""
for epoch in range(epochs):
output = self.forward(X)
self.backward(X, y, output)
if epoch % 100 == 0:
loss = np.mean((y - output) ** 2)
print(f"Epoch {epoch}, Loss: {loss}")
def predict(self, X):
"""Make predictions."""
return self.forward(X)
# Example usage
if __name__ == "__main__":
# Example dataset
X = np.array([[0], [1], [2], [3], [4]], dtype=float)
y = np.array([[0], [2], [4], [6], [8]], dtype=float) # Linear relationship: y = 2x
# Scale data
X /= np.max(X)
y /= np.max(y)
# Create and train the model
nn = FeedForwardNN(n_input=1, n_hidden=10, n_output=1, learning_rate=0.1)
nn.fit(X, y, epochs=1000)
# Test predictions
predictions = nn.predict(X)
print("Predictions:", predictions)
print("Actual values:", y)
OUTPUT:
Epoch 0, Loss: 0.423209316523922
Epoch 100, Loss: 0.012751554694317487
Epoch 200, Loss: 0.004091264310811452
Epoch 300, Loss: 0.003407964147190816
Epoch 400, Loss: 0.003261432113502563
Epoch 500, Loss: 0.0031387241587497255
Epoch 600, Loss: 0.0030218435029647278
Epoch 700, Loss: 0.002910123186987161
Epoch 800, Loss: 0.0028033722960642423
Epoch 900, Loss: 0.0027014068416472323
Predictions: [[-0.04943591]
[ 0.29121125]
[ 0.55959607]
[ 0.76843809]
[ 0.92938637]]
Actual values: [[0. ]
[0.25]
[0.5 ]
[0.75]
[1. ]]
Result:
Ex No: 3 Implementing a Deep-Feed- Forward Neural Network for Image Classification
Date:
Aim:
Algorithm:
Program:
#load required packages import tensorflow as tf
from tensorflow import keras
from keras.models import Sequential from keras import Input
from keras.layers import Dense import pandas as pd
import numpy as np import sklearn
from sklearn.metrics import classification_report import matplotlib
import matplotlib.pyplot as plt
# Load digits data
(X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data()
# Print shapes
print("Shape of X_train: ", X_train.shape) print("Shape of y_train: ", y_train.shape) print("Shape of
X_test: ", X_test.shape) print("Shape of y_test: ", y_test.shape)
# Display images of the first 10 digits in the training set and their true lables fig, axs = plt.subplots(2, 5,
sharey=False, tight_layout=True, figsize=(12,6), facecolor='white')
n=0
for i in range(0,2):
for j in range(0,5): axs[i,j].matshow(X_train[n]) axs[i,j].set(title=y_train[n]) n=n+1
plt.show()
# Reshape and normalize (divide by 255) input data
X_train = X_train.reshape(60000, 784).astype("float32") / 255 X_test = X_test.reshape(10000,
784).astype("float32") / 255
# Print shapes
print("New shape of X_train: ", X_train.shape) print("New shape of X_test: ", X_test.shape)
#Design the Deep FF Neural Network architecture model = Sequential(name="DFF-Model") # Model
model.add(Input(shape=(784,), name='Input-Layer')) # Input Layer - need to specify the shape of inputs
model.add(Dense(128, activation='relu', name='Hidden-Layer-1', kernel_initializer='HeNormal'))
model.add(Dense(64, activation='relu', name='Hidden-Layer-2', kernel_initializer='HeNormal'))
model.add(Dense(32, activation='relu', name='Hidden-Layer-3', kernel_initializer='HeNormal'))
model.add(Dense(10, activation='softmax', name='Output-Layer'))
#Compile keras model
model.compile(optimizer='adam', loss='SparseCategoricalCrossentropy', metrics=['Accuracy'],
loss_weights=None, weighted_metrics=None, run_eagerly=None, steps_per_execution=None)
#Fit keras model on the dataset
model.fit(X_train, y_train, batch_size=10, epochs=5, verbose='auto', callbacks=None,
validation_split=0.2, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, #
Integer, default=0, Epoch at which to start training (useful for resuming a previous training run).
steps_per_epoch=None, validation_steps=None, validation_batch_size=None, validation_freq=5,
max_queue_size=10, workers=1, use_multiprocessing=False,)
# apply the trained model to make predictions # Predict class labels on training data
pred_labels_tr = np.array(tf.math.argmax(model.predict(X_train),axis=1)) # Predict class labels on a test
data
pred_labels_te = np.array(tf.math.argmax(model.predict(X_test),axis=1))
#Model Performance Summary print("")
print(' Model Summary ') model.summary()
print("")
# Printing the parameters:Deep Feed Forward Neural Network contains more than 100K
#print(' Weights and Biases ') #for layer in model_d1.layers:
#print("Layer: ", layer.name) # print layer name
#print(" --Kernels (Weights): ", layer.get_weights()[0]) # kernels (weights) #print(" --Biases: ",
layer.get_weights()[1]) # biases
print("")
print('---------- Evaluation on Training Data ')
print(classification_report(y_train, pred_labels_tr)) print("")
print('---------- Evaluation on Test Data ')
print(classification_report(y_test, pred_labels_te)) print("")
OUTPUT:
Result:
Ex No: 4 Implementing Regularization Techniques Deep Learning
Date:
Aim:
Algorithm:
Program:
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, regularizers
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
# Load MNIST dataset
(X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data()
# Normalize the data
X_train, X_test = X_train / 255.0, X_test / 255.0
# Flatten the images
X_train = X_train.reshape(-1, 28*28)
X_test = X_test.reshape(-1, 28*28)
# Convert labels to categorical (one-hot encoding)
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model = keras.Sequential([
layers.Dense(512, activation='relu', kernel_regularizer=regularizers.l2(0.01)), # L2 Regularization
layers.Dropout(0.5), # Dropout Regularization
layers.BatchNormalization(), # Batch Normalization
layers.Dense(256, activation='relu', kernel_regularizer=regularizers.l1(0.01)), # L1 Regularization
layers.Dropout(0.3),
layers.BatchNormalization(),
layers.Dense(10, activation='softmax') # Output layer])
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Early stopping callback
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=5,
restore_best_weights=True)
# Train the model
history = model.fit(X_train, y_train, epochs=50, validation_data=(X_test, y_test),
callbacks=[early_stopping])
#Visualizing Training Progress
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
Output:
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11490434/11490434 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
Epoch 1/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 35s 15ms/step - accuracy: 0.7750 - loss:
11.2035 - val_accuracy: 0.8861 - val_loss: 1.7236
Epoch 2/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 24s 13ms/step - accuracy: 0.8159 - loss:
1.8179 - val_accuracy: 0.9083 - val_loss: 1.2380
Epoch 3/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 41s 13ms/step - accuracy: 0.8105 - loss:
1.5340 - val_accuracy: 0.9050 - val_loss: 1.1971
Epoch 4/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 25s 13ms/step - accuracy: 0.8104 - loss:
1.4364 - val_accuracy: 0.9130 - val_loss: 1.1198
Epoch 5/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 28s 15ms/step - accuracy: 0.8103 - loss:
1.3715 - val_accuracy: 0.9178 - val_loss: 1.0066
Epoch 6/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 37s 13ms/step - accuracy: 0.8053 - loss:
1.3316 - val_accuracy: 0.9067 - val_loss: 1.0245
Epoch 7/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 40s 12ms/step - accuracy: 0.8002 - loss:
1.3170 - val_accuracy: 0.9199 - val_loss: 0.9647
Epoch 8/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 43s 13ms/step - accuracy: 0.8143 - loss:
1.2657 - val_accuracy: 0.9093 - val_loss: 0.9978
Epoch 9/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 23s 12ms/step - accuracy: 0.8067 - loss:
1.2820 - val_accuracy: 0.9212 - val_loss: 0.9440
Epoch 10/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 41s 12ms/step - accuracy: 0.8073 - loss:
1.2439 - val_accuracy: 0.9209 - val_loss: 0.9482
Epoch 11/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 42s 13ms/step - accuracy: 0.8088 - loss:
1.2563 - val_accuracy: 0.9186 - val_loss: 0.9308
Epoch 12/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 40s 13ms/step - accuracy: 0.8053 - loss:
1.2493 - val_accuracy: 0.9123 - val_loss: 0.9325
Epoch 13/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 43s 14ms/step - accuracy: 0.8023 - loss:
1.2381 - val_accuracy: 0.9125 - val_loss: 0.9259
Epoch 14/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 26s 14ms/step - accuracy: 0.8016 - loss:
1.2321 - val_accuracy: 0.9159 - val_loss: 0.9011
Epoch 15/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 27s 14ms/step - accuracy: 0.8047 - loss:
1.2195 - val_accuracy: 0.8884 - val_loss: 0.9978
Epoch 16/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 40s 14ms/step - accuracy: 0.7989 - loss:
1.2289 - val_accuracy: 0.9119 - val_loss: 0.8957
Epoch 17/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 26s 14ms/step - accuracy: 0.8007 - loss:
1.2022 - val_accuracy: 0.8944 - val_loss: 0.9354
Epoch 18/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 40s 13ms/step - accuracy: 0.7992 - loss:
1.1980 - val_accuracy: 0.9050 - val_loss: 0.8906
Epoch 19/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 27s 14ms/step - accuracy: 0.7967 - loss:
1.2069 - val_accuracy: 0.9208 - val_loss: 0.8780
Epoch 20/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 40s 14ms/step - accuracy: 0.7924 - loss:
1.2119 - val_accuracy: 0.9051 - val_loss: 0.9037
Epoch 21/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 39s 13ms/step - accuracy: 0.7811 - loss:
1.2489 - val_accuracy: 0.9032 - val_loss: 0.8735
Epoch 22/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 41s 13ms/step - accuracy: 0.7907 - loss:
1.2039 - val_accuracy: 0.9133 - val_loss: 0.8462
Epoch 23/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 41s 13ms/step - accuracy: 0.7933 - loss:
1.1935 - val_accuracy: 0.9126 - val_loss: 0.8707
Epoch 24/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 25s 13ms/step - accuracy: 0.7935 - loss:
1.1985 - val_accuracy: 0.8980 - val_loss: 0.8832
Epoch 25/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 39s 12ms/step - accuracy: 0.7913 - loss:
1.2060 - val_accuracy: 0.9054 - val_loss: 0.8631
Epoch 26/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 43s 14ms/step - accuracy: 0.7970 - loss:
1.1961 - val_accuracy: 0.9137 - val_loss: 0.8399
Epoch 27/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 24s 13ms/step - accuracy: 0.7900 - loss:
1.1910 - val_accuracy: 0.9144 - val_loss: 0.8167
Epoch 28/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 27s 14ms/step - accuracy: 0.7968 - loss:
1.1791 - val_accuracy: 0.9116 - val_loss: 0.8210
Epoch 29/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 40s 14ms/step - accuracy: 0.7938 - loss:
1.1829 - val_accuracy: 0.9063 - val_loss: 0.8829
Epoch 30/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 27s 14ms/step - accuracy: 0.8003 - loss:
1.1655 - val_accuracy: 0.9173 - val_loss: 0.8390
Epoch 31/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 40s 14ms/step - accuracy: 0.7952 - loss:
1.1876 - val_accuracy: 0.9029 - val_loss: 0.8565
Epoch 32/50
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 25s 13ms/step - accuracy: 0.8060 - loss:
1.1589 - val_accuracy: 0.9182 - val_loss: 0.8615
Result:
Ex: No: 5 Implementing a Simple CNN for Image Classification
Date:
Aim:
Algorithm:
Program:
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
import os
from tensorflow.keras.preprocessing import image
import numpy as np
train_dir = "D:/SJIT/DL/LAB/at/train"
test_dir = "D:/SJIT/DL/LAB/at/test"
img_height, img_width = 224, 224
num_classes = len(os.listdir(train_dir))
datagen = ImageDataGenerator( rescale=1./255, validation_split=0.2)
train_generator = datagen.flow_from_directory(train_dir,
target_size=(224,224), batch_size=20,
class_mode='categorical',subset='training',shuffle=True)
Found 236 images belonging to 2 classes.
validation_generator = datagen.flow_from_directory(train_dir,
target_size=(224,224), batch_size=20, class_mode='categorical',subset='validation',
shuffle=False)
Found 58 images belonging to 2 classes.
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(img_height, img_width, 3)),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
Flatten(),
Dense(64, activation='relu'),
Dense(num_classes, activation='softmax')])
model.compile(optimizer='adam',loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_generator, epochs=10, validation_data=validation_generator)
img_path = "D:\\SJIT\\DL\\LAB\\lp.jpg" # Replace with the path to your image
img = image.load_img(img_path, target_size=(224, 224)) # Adjust target_size if
needed
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = img / 255.0
predictions = model.predict(img)
predicted_class = np.argmax(predictions)
class_labels = {0: 'apples', 1: 'tomatoes'}
predicted_label = class_labels[predicted_class]
print(f"Predicted class: {predicted_class} (Label: {predicted_label})")
Output:
Result:
Ex: No: 6 Implementing Transfer Learning with a Pre-trained CNN
Date:
Aim:
Algorithm:
Program:
import tensorflow as tf
from tensorflow.keras.applications import VGG16
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Set your custom dataset path
train_dir = "D:/SJIT/DL/LAB/at/train"
test_dir = "D:/SJIT/DL/LAB/at/test"
# Define hyperparameters
img_width, img_height = 224, 224
batch_size = 32
num_classes = 2 # The number of classes in your dataset
epochs = 10
# Data augmentation and preprocessing
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
# Load the pre-trained VGG16 model
base_model = VGG16(weights='imagenet', include_top=False,
input_shape=(img_width, img_height, 3))
# Create a custom classification model on top of VGG16
model = Sequential()
model.add(base_model) # Add the pre-trained VGG16 model
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax')
# Freeze the pre-trained layers
for layer in base_model.layers:
layer.trainable = False
# Compile the model
model.compile(optimizer=Adam(lr=0.0001), loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
model.fit(train_generator, epochs=epochs, validation_data=validation_generator)
# Optionally, you can unfreeze and fine-tune some layers
for layer in base_model.layers[-4:]:
layer.trainable = True
model.compile(optimizer=Adam(lr=0.00001), loss='categorical_crossentropy',
metrics=['accuracy'])
# Continue training for additional epochs
model.fit(train_generator, epochs=epochs, validation_data=validation_generator)
img_path = "D:\\SJIT\\DL\\LAB\\lp.jpg" # Replace with the path to your image
img = image.load_img(img_path, target_size=(224, 224)) # Adjust target_size if
needed
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = img / 255.0
predictions = model.predict(img)
1/1 [==============================] - 0s 140ms/step
predicted_class = np.argmax(predictions)
class_labels = {0: 'apples', 1: 'tomatoes'}
predicted_label = class_labels[predicted_class]
print(f"Predicted class: {predicted_class} (Label: {predicted_label})")
OUTPUT:
Predicted Class: apple
Result:
Ex: No: 7 Implementing an Auto encoder for Image Reconstruction
Date:
Aim:
Algorithm:
Program:
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, LSTM, RepeatVector, TimeDistributed
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import plot_model
import matplotlib.pyplot as plt
# Load MNIST dataset
(x_train, _), (x_test, _) = mnist.load_data()
# Reconstructed images
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_images[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
OUTPUT:
Result:
Ex: No: 8 Implementing a Generative Adversarial Network for Image Generation
Date:
Aim:
Algorithm:
Program:
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense, Reshape, Flatten
from tensorflow.keras.layers import BatchNormalization, LeakyReLU
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.datasets import mnist
# Load MNIST data
(x_train, _), (_, _) = mnist.load_data()
# Normalize and reshape data
x_train = x_train / 127.5 - 1.0
x_train = np.expand_dims(x_train, axis=3)
# Define the generator model
generator = Sequential()
generator.add(Dense(128 * 7 * 7, input_dim=100))
generator.add(LeakyReLU(0.2))
generator.add(Reshape((7, 7, 128)))
generator.add(BatchNormalization())
generator.add(Flatten())
generator.add(Dense(28 * 28 * 1, activation='tanh'))
generator.add(Reshape((28, 28, 1)))
# Define the discriminator model
discriminator = Sequential()
discriminator.add(Flatten(input_shape=(28, 28, 1)))
discriminator.add(Dense(128))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dense(1, activation='sigmoid'))
# Compile the discriminator
discriminator.compile(loss='binary_crossentropy',
optimizer=Adam(learning_rate=0.0002, beta_1=0.5), metrics=['accuracy'])
# Freeze the discriminator during GAN training
discriminator.trainable = False
# Combine generator and discriminator into a GAN model
gan = Sequential()
gan.add(generator)
gan.add(discriminator)
# Compile the GAN
gan.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.0002,
beta_1=0.5))
# Function to train the GAN
def train_gan(epochs=1, batch_size=128):
batch_count = x_train.shape[0] // batch_size
for e in range(epochs):
for _ in range(batch_count):
noise = np.random.normal(0, 1, size=[batch_size, 100])
generated_images = generator.predict(noise)
image_batch = x_train[np.random.randint(0, x_train.shape[0],
size=batch_size)]
X = np.concatenate([image_batch, generated_images])
y_dis = np.zeros(2 * batch_size)
y_dis[:batch_size] = 0.9 # Label smoothing
discriminator.trainable = True
d_loss = discriminator.train_on_batch(X, y_dis)
noise = np.random.normal(0, 1, size=[batch_size, 100])
y_gen = np.ones(batch_size)
discriminator.trainable = False
g_loss = gan.train_on_batch(noise, y_gen)
print(f"Epoch {e+1}/{epochs}, Discriminator Loss: {d_loss[0]},
Generator Loss: {g_loss}")
# Train the GAN
train_gan(epochs=200, batch_size=128)
# Generate and plot some images
def plot_generated_images(epoch, examples=10, dim=(1, 10), figsize=(10, 1)):
noise = np.random.normal(0, 1, size=[examples, 100])
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(examples, 28, 28)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.imshow(generated_images[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
plt.savefig(f'gan_generated_image_epoch_{epoch}.png')
# Plot generated images for a few epochs
for epoch in range(1, 10):
plot_generated_images(epoch)
OUTPUT:
Result:
Ex: No: 9 Implementing a Convolutional Neural Network for Sentiment Analysis
Date:
Aim:
Algorithm:
Program:
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
import matplotlib.pyplot as plt
Date:
Aim:
Algorithm:
Program:
import tensorflow as tf
import numpy as np
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
dataset = sequences.map(split_input_target)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[BATCH_SIZE, None]),
tf.keras.layers.LSTM(rnn_units,
return_sequences=True,
stateful=True,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size)
])
model.compile(optimizer='adam', loss=loss)
# Train the model for 1 epoch (for demonstration; use more epochs for better results)
EPOCHS = 1
history = model.fit(dataset, epochs=EPOCHS)
# For text generation, rebuild the model with batch size 1 and load the trained weights.
model_for_generation = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[1, None]),
tf.keras.layers.LSTM(rnn_units,
return_sequences=True,
stateful=True,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size)
])
model_for_generation.set_weights(model.get_weights())
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
predictions = tf.squeeze(predictions, 0)
Generated Text:
ROMEO: And thus the sun of our dark night doth rise, and all the trembling earth in silence weeps.
Why, when the stars did twinkle high,
my heart did yield to sudden rapture, and the night sang of our endless sorrow.
O, tell me, what light through yonder window breaks?
Result: