Assignment 1
Design a McCulloch-Pitts neural network which behaves as AND function
using Adaline learning. Consider unipolar case. Perform analysis by varying NN
parameters.
Code:
import numpy as np
X = [Link]([[0, 0], [0, 1], [1, 0], [1, 1]])
AND_y = [Link]([0, 0, 0, 1])
class Adaline:
def __init__(self, learning_rate=0.1, epochs=50):
self.learning_rate = learning_rate
[Link] = epochs
[Link] = None
[Link] = None
def activation(self, net_input):
return 1 if net_input >= 0 else 0
def train(self, X, y):
[Link] = [Link]([Link][1])
[Link] = 0
for _ in range([Link]):
for i in range(len(X)):
net_input = [Link](X[i], [Link]) + [Link]
output = [Link](net_input)
error = y[i] - output
[Link] += self.learning_rate * error * X[i]
[Link] += self.learning_rate * error
def predict(self, X):
return [Link]([[Link]([Link](x, [Link]) + [Link]) for x in X])
model = Adaline()
[Link](X, AND_y)
predictions = [Link](X)
print(f"Output: {predictions}")
Output:
Output: [0 0 0 1]
Assignment 2: Similarly develop a McCulloch-Pitts neural net for OR, NAND and NOR
gate and draw neural nets.
import numpy as np
import networkx as nx
import [Link] as plt
X = [Link]([[0, 0], [0, 1], [1, 0], [1, 1]])
AND_y = [Link]([0, 0, 0, 1])
OR_y = [Link]([0, 1, 1, 1])
NAND_y = [Link]([1, 1, 1, 0])
NOR_y = [Link]([1, 0, 0, 0])
class Adaline:
def __init__(self, learning_rate=0.1, epochs=50):
self.learning_rate = learning_rate
[Link] = epochs
[Link] = None
[Link] = None
def activation(self, net_input):
return 1 if net_input >= 0 else 0
def train(self, X, y):
[Link] = [Link]([Link][1])
[Link] = 0
for _ in range([Link]):
for i in range(len(X)):
net_input = [Link](X[i], [Link]) + [Link]
output = [Link](net_input)
error = y[i] - output
[Link] += self.learning_rate * error * X[i]
[Link] += self.learning_rate * error
def predict(self, X):
return [Link]([[Link]([Link](x, [Link]) + [Link]) for x in X])
def draw_neural_net(gate_name):
G = [Link]()
G.add_edges_from([("x1", "Neuron"), ("x2", "Neuron"), ("Neuron", "Output")])
[Link](figsize=(5, 3))
pos = {"x1": (0, 1), "x2": (0, -1), "Neuron": (2, 0), "Output": (4, 0)}
labels = {"x1": "x1", "x2": "x2", "Neuron": "Neuron", "Output": gate_name}
[Link](G, pos, with_labels=True, node_size=3000, node_color="lightblue", edge_color="black",
font_size=10)
[Link]()
for gate_name, y in zip(["AND", "OR", "NAND", "NOR"], [AND_y, OR_y, NAND_y, NOR_y]):
model = Adaline()
[Link](X, y)
predictions = [Link](X)
print(f"Output for {gate_name}: {predictions}")
draw_neural_net(gate_name)
Output:
Output for AND: [0 0 0 1]
Output for OR: [0 1 1 1]
Output for NAND: [1 1 1 0]
Output for NOR: [1 0 0 0]
Assignment 3: Perform test for bipolar model as well.
import numpy as np
X = [Link]([[0, 0], [0, 1], [1, 0], [1, 1]])
BIPOLAR_AND_y = [Link]([-1, -1, -1, 1])
class Adaline:
def __init__(self, learning_rate=0.1, epochs=50):
self.learning_rate = learning_rate
[Link] = epochs
[Link] = None
[Link] = None
def activation(self, net_input):
return 1 if net_input >= 0 else -1
def train(self, X, y):
[Link] = [Link]([Link][1])
[Link] = 0
for _ in range([Link]):
for i in range(len(X)):
net_input = [Link](X[i], [Link]) + [Link]
output = [Link](net_input)
error = y[i] - output
[Link] += self.learning_rate * error * X[i]
[Link] += self.learning_rate * error
def predict(self, X):
return [Link]([[Link]([Link](x, [Link]) + [Link]) for x in X])
model = Adaline()
[Link](X, BIPOLAR_AND_y)
predictions = [Link](X)
print(f"Output: {predictions}")
Output:
Output: [-1 -1 -1 1]
Assignment 4: Implement McCulloch-Pitts neural network model for XOR and give all
the formula you used in the implementation. Draw the MLPs used for the implementation of
above functions.
import numpy as np
X = [Link]([[0, 0], [0, 1], [1, 0], [1, 1]])
y = [Link]([0, 1, 1, 0])
class MLP:
def __init__(self, learning_rate=0.1, epochs=10000):
self.learning_rate = learning_rate
[Link] = epochs
self.weights_hidden = [Link](2, 2)
self.bias_hidden = [Link](2)
self.weights_output = [Link](2)
self.bias_output = [Link](1)
def sigmoid(self, x):
return 1 / (1 + [Link](-x))
def train(self, X, y):
for _ in range([Link]):
for i in range(len(X)):
hidden_input = [Link](X[i], self.weights_hidden) + self.bias_hidden
hidden_output = [Link](hidden_input)
final_input = [Link](hidden_output, self.weights_output) + self.bias_output
final_output = [Link](final_input)
error = y[i] - final_output
self.weights_output += self.learning_rate * error * hidden_output
self.bias_output += self.learning_rate * error
self.weights_hidden += self.learning_rate * error * [Link](X[i], self.weights_output *
hidden_output * (1 - hidden_output))
self.bias_hidden += self.learning_rate * error * self.weights_output * hidden_output * (1 -
hidden_output)
def predict(self, X):
predictions = []
for i in range(len(X)):
hidden_input = [Link](X[i], self.weights_hidden) + self.bias_hidden
hidden_output = [Link](hidden_input)
final_input = [Link](hidden_output, self.weights_output) + self.bias_output
final_output = [Link](final_input)
[Link](1 if final_output >= 0.5 else 0)
return [Link](predictions)
model = MLP()
[Link](X, y)
predictions = [Link](X)
print(f"Output: {predictions}")
Output:
Output: [0 1 1 0]
Assignment 5: Implement MLP model for XOR by using backpropagation algorithm
import numpy as np
X = [Link]([[0, 0], [0, 1], [1, 0], [1, 1]])
y = [Link]([[0], [1], [1], [0]])
class MLP_Backprop:
def __init__(self, learning_rate=0.1, epochs=10000):
self.learning_rate = learning_rate
[Link] = epochs
self.weights_hidden = [Link](2, 2)
self.bias_hidden = [Link](2)
self.weights_output = [Link](2, 1)
self.bias_output = [Link](1)
def sigmoid(self, x):
return 1 / (1 + [Link](-x))
def sigmoid_derivative(self, x):
return x * (1 - x)
def train(self, X, y):
for _ in range([Link]):
hidden_input = [Link](X, self.weights_hidden) + self.bias_hidden
hidden_output = [Link](hidden_input)
final_input = [Link](hidden_output, self.weights_output) + self.bias_output
final_output = [Link](final_input)
error = y - final_output
d_output = error * self.sigmoid_derivative(final_output)
d_hidden = d_output.dot(self.weights_output.T) * self.sigmoid_derivative(hidden_output)
self.weights_output += hidden_output.[Link](d_output) * self.learning_rate
self.bias_output += [Link](d_output, axis=0) * self.learning_rate
self.weights_hidden += [Link](d_hidden) * self.learning_rate
self.bias_hidden += [Link](d_hidden, axis=0) * self.learning_rate
def predict(self, X):
hidden_input = [Link](X, self.weights_hidden) + self.bias_hidden
hidden_output = [Link](hidden_input)
final_input = [Link](hidden_output, self.weights_output) + self.bias_output
final_output = [Link](final_input)
return (final_output >= 0.5).astype(int)
model = MLP_Backprop()
[Link](X, y)
predictions = [Link](X)
print(f"Output: {[Link]()}")
Output:
Output: [1 1 0 0]
Conclusion:
The implementation of McCulloch-Pitts and MLP models for basic logic gates, including XOR,
successfully demonstrates the ability of neural networks to learn nonlinear functions. The
Adaline learning approach effectively models simple gates like AND, OR, NAND, and NOR,
while the XOR function, being non-linearly separable, requires a multi-layer perceptron (MLP)
trained with backpropagation. The results confirm that neural networks can approximate
complex decision boundaries given the correct architecture and learning strategy. Variations in
learning rate and epochs impact convergence, highlighting the importance of parameter tuning.
Overall, this lab reinforces foundational neural network concepts and their practical applications
in AI.