In [4]: import numpy as np
import matplotlib.pyplot as plt
class Madaline:
def __init__(self):
self.w1 = np.random.rand(2, 2) # weights for layer 1
self.b1 = np.random.rand(2) # biases for layer 1
self.w2 = np.random.rand(2, 1) # weights for layer 2 (output layer has
self.b2 = np.random.rand(1) # bias for layer 2
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def forward(self, x):
# Forward pass through the network
z1 = np.dot(x, self.w1) + self.b1
a1 = self.sigmoid(z1)
z2 = np.dot(a1, self.w2) + self.b2
a2 = self.sigmoid(z2)
return a2
def train(self, X, Y, epochs=1000, learning_rate=0.1):
errors = []
for epoch in range(epochs):
total_error = 0
for i in range(len(X)):
x = X[i]
y_true = Y[i]
# Forward pass
z1 = np.dot(x, self.w1) + self.b1
a1 = self.sigmoid(z1)
z2 = np.dot(a1, self.w2) + self.b2
a2 = self.sigmoid(z2)
# Calculate error
error = y_true - a2
total_error += np.abs(error)
# Backpropagation
delta_a2 = error * (a2 * (1 - a2)) # Output layer delta
delta_w2 = learning_rate * np.dot(a1.reshape(-1, 1), delta_a2.r
delta_b2 = learning_rate * delta_a2 # dB2
delta_a1 = np.dot(self.w2, delta_a2.T) * (a1 * (1 - a1)) # Hid
delta_w1 = learning_rate * np.dot(x.reshape(-1, 1), delta_a1.re
delta_b1 = learning_rate * delta_a1 # dB1
# Update weights and biases
self.w2 += delta_w2
self.b2 += delta_b2.squeeze()
self.w1 += delta_w1
self.b1 += delta_b1
errors.append(total_error / len(X))
if epoch % 100 == 0:
print(f"Epoch {epoch}: Error = {errors[-1]}")
return errors
# XOR inputs and outputs
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
Y = np.array([[0], [1], [1], [0]])
# Create Madaline network
model = Madaline()
# Train the network
errors = model.train(X, Y, epochs=1000)
Epoch 0: Error = [0.50015251]
Epoch 100: Error = [0.5032474]
Epoch 200: Error = [0.50315684]
Epoch 300: Error = [0.50304412]
Epoch 400: Error = [0.50291462]
Epoch 500: Error = [0.50276329]
Epoch 600: Error = [0.50258391]
Epoch 700: Error = [0.50236871]
Epoch 800: Error = [0.50210799]
Epoch 900: Error = [0.50178959]