Solving XOR Problem Using DNN AIDS
Solving XOR Problem Using DNN AIDS
Algorithm:
1. prepare training data: Create input-output pairs for the XOR operation.
2. Design DNN model: Create a DNN with input, hidden, and output layers.
3. Forward Propagation: Pass input data through the network to get predictions.
5. Loss Function: Measure the difference between predicted and actual outputs.
12. Predict: Now, the DNN can predict XOR results accurately.
Program:
import numpy as np
x=np.array([[0,0,1,1],[0,1,0,1]])
y=np.array([[0,1,1,0]])
# Number of inputs
n_x = 2
n_y = 1
n_h = 2
m = x.shape[1]
# Learning rate
lr = 0.1
np.random.seed(2)
losses = []
def sigmoid(z):
z= 1/(1+np.exp(-z))
return z
# Forward propagation
def forward_prop(w1,w2,x):
z1 = np.dot(w1,x)
a1 = sigmoid(z1)
z2 = np.dot(w2,a1)
a2 = sigmoid(z2)
return z1,a1,z2,a2
# Backward propagation
def back_prop(m,w1,w2,z1,a1,z2,a2,y):
dz2 = a2-y
dw2 = np.dot(dz2,a1.T)/m
dw1 = np.dot(dz1,x.T)/m
dw1 = np.reshape(dw1,w1.shape)
dw2 = np.reshape(dw2,w2.shape)
return dz2,dw2,dz1,dw1
iterations = 10000
for i in range(iterations):
z1,a1,z2,a2 = forward_prop(w1,w2,x)
loss = -(1/m)*np.sum(y*np.log(a2)+(1-y)*np.log(1-a2))
losses.append(loss)
da2,dw2,dz1,dw1 = back_prop(m,w1,w2,z1,a1,z2,a2,y)
w2 = w2-lr*dw2
w1 = w1-lr*dw1
plt.xlabel("EPOCHS")
plt.ylabel("Loss value")
def predict(w1,w2,input):
z1,a1,z2,a2 = forward_prop(w1,w2,test)
a2 = np.squeeze(a2)
if a2>=0.5:
print("For input", [i[0] for i in input], "output is 1")# ['{:.2f}'.format(i) for i in x])
else:
test = np.array([[1],[0]])
predict(w1,w2,test)
test = np.array([[0],[0]])
predict(w1,w2,test)
test = np.array([[0],[1]])
predict(w1,w2,test)
test = np.array([[1],[1]])
predict(w1,w2,test)
output:
For input [1, 0] output is 1
For input [0, 0] output is 0
For input [0, 1] output is 1
For input [1, 1] output is 0