Muhammad Fahad (FA21-BCS-124)
Mujtaba Zulfiqar (FA21-BCS-019)
Machine Learning
Assignment 1:
Multi-Layer Perceptron
import numpy as np
class MLP:
def __init__(self, input_size, hidden_size, learning_rate=0.01,
num_epochs=100):
self.input_size = input_size
self.hidden_size = hidden_size
self.learning_rate = learning_rate
self.num_epochs = num_epochs
# Initialize weights and biases for the hidden layer
self.weights_input_hidden = np.random.randn(input_size, hidden_size)
self.bias_hidden = np.zeros(hidden_size)
# Initialize weights and biases for the output layer
self.weights_hidden_output = np.random.randn(hidden_size)
self.bias_output = np.zeros(1) # Single bias for single output
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
return x * (1 - x)
def forward_propagation(self, inputs):
# Forward propagation through the hidden layer
self.hidden_activation = self.sigmoid(np.dot(inputs,
self.weights_input_hidden) + self.bias_hidden)
# Forward propagation through the output layer
self.output_activation = self.sigmoid(np.dot(self.hidden_activation,
self.weights_hidden_output) + self.bias_output)
def backward_propagation(self, inputs, target):
# Backward propagation through the output layer
output_error = self.output_activation - target
output_delta = output_error * output_error * self.output_activation
# Backward propagation through the hidden layer
hidden_error = np.dot(output_delta, self.weights_hidden_output.T)
hidden_delta = hidden_error *
self.sigmoid_derivative(self.hidden_activation)
# Update weights and biases
self.weights_hidden_output -= self.learning_rate *
np.dot(self.hidden_activation.T, output_delta)
self.bias_output -= self.learning_rate * np.sum(output_delta)
self.weights_input_hidden -= self.learning_rate * np.dot(inputs.T,
hidden_delta)
self.bias_hidden -= self.learning_rate * np.sum(hidden_delta)
def train(self, inputs, target):
for epoch in range(self.num_epochs):
# Forward propagation
self.forward_propagation(inputs)
# Backward propagation
self.backward_propagation(inputs, target)
def predict(self, inputs):
self.forward_propagation(inputs)
return np.round(self.output_activation).astype(int)
# Example usage
training_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
target = np.array([0, 1, 1, 1]) # Single target for each input
mlp = MLP(input_size=2, hidden_size=4)
mlp.train(training_inputs, target)
# Test the trained MLP
inputs = np.array([[1,1]])
print(mlp.predict(inputs)) # Output: Single output value as an integer