02_ML_PDF
02_ML_PDF
import numpy as np
np.random.seed(0)
#
# Back propagation algorithm realization
#
#
# From
# https://medium.com/@jayeshbahire/the-xor-problem-in-neural-networks-
50006411840b
#
# The back propagation algorithm begins by comparing the actual value
output
# by the forward propagation process to the expected value and then
moves
# backward through the network, slightly adjusting each of the weights
in
# a direction that reduces the size of the error by a small degree.
#
# Both forward and back propagation are re-run thousands of times on
each
# input combination until the network can accurately predict the
expected
# output of the possible inputs using forward propagation.
#
# For the XOR problem, 100% of possible data examples are available to
use
# in the training process. We can therefore expect the trained network
to be
# 100% accurate in its predictions and there is no need to be
concerned with
# issues such as bias and variance in the resulting model.
#
class OneHiddenLayerNetwork:
#
# activation functions
#
@staticmethod
def tang(y):
return np.tanh(y)
@staticmethod
def derivative_tang(y):
return 1.0 - y ** 2
@staticmethod
def sigmoid(y):
return 1 / (1 + np.exp(-y))
@staticmethod
def derivative_sigmoid(y):
return y * (1 - y)
#
# neural network architecture
# simple 2 x 2 x 1 that is enough for XOR example
# input x hidden x output
self.learning_rate = learning_rate
self.output = None
#
# forward pass
# layer by layer
#
def feed_forward_pass(self, x_values):
# forward
input_layer = x_values
hidden_layer = self.activation('tang', np.dot(input_layer,
self.weights[0]))
output_layer = self.activation('tang', np.dot(hidden_layer,
self.weights[1]))
self.layers = [
input_layer,
hidden_layer,
output_layer
]
#
# back propagation error through the network layers
#
def backward_pass(self, target_output, actual_output):
network = OneHiddenLayerNetwork(learning_rate=0.1)
iterations = 5000
# training
for i in range(iterations):
network.train(X, y)
ten = iterations // 10
if i % ten == 0:
print('-' * 10)
print("Iteration number: " + str(i) + ' / ' +
"Squared loss: " + str(np.mean(np.square(y -
network.output))))
# predict
for i in range(len(X)):
print('-' * 10)
print('Input value: ' + str(X[i]))
print('Predicted target: ' + str(network.predict(X[i])))
print('Actual target: ' + str(y[i]))
----------
Iteration number: 0 / Squared loss: 0.4799042135840691
----------
Iteration number: 500 / Squared loss: 0.042950494381573286
----------
Iteration number: 1000 / Squared loss: 0.013212838667124875
----------
Iteration number: 1500 / Squared loss: 0.007013232507805744
----------
Iteration number: 2000 / Squared loss: 0.004629345031713968
----------
Iteration number: 2500 / Squared loss: 0.003412401497355086
----------
Iteration number: 3000 / Squared loss: 0.002685484896745155
----------
Iteration number: 3500 / Squared loss: 0.0022061432569869077
----------
Iteration number: 4000 / Squared loss: 0.0018679274404916828
----------
Iteration number: 4500 / Squared loss: 0.0016172802953129996
----------
Input value: [0. 0.]
Predicted target: [0.]
Actual target: [0.]
----------
Input value: [0. 1.]
Predicted target: [0.9470363]
Actual target: [1.]
----------
Input value: [1. 0.]
Predicted target: [0.94703627]
Actual target: [1.]
----------
Input value: [1. 1.]
Predicted target: [0.00936281]
Actual target: [0.]