AI
AI
3.2- Now based on the distance value, sort them on ascending order.
3.3- Next, it will choose the top K rows from the sorted array.
3.4 Now. It will assign a class to the test point based on most frequent
class of these row.
Chapter-8
1.About Neural Network
• The neural network is made up of a highly connected network of individual
computing elements that collectively can be used to solve interesting and
difficult problems.
• Once trained, neural networks can generalize to solve different problems that
have similar characteristics.
• .The processing element of a neural
network is modeled after a neuron.
❑ Dendrites : Receive information from
other neurons, fundamental processing
in our brain.
❑ Axon : Carries neuron’s message to
other body cells.
2. randomperceptron.py
import numpy as np
weight =weight.flatten()
print(weight)
threshold = 1
learning_rate = 0.1
i=0
print("Perceptron Training:")
print("#################")
while i < 4:
summation = a[i] * weight[0] + b[i] * weight[1]
o = activation(summation, threshold)
print("Input:" + str(a[i]) + "," + str(b[i]))
print("Weight:" + str(weight[0]) + "," + str(weight[1]))
print("Summation:" + str(summation) + "threshold :" + str(threshold))
print("Actual output :" + str(y[i]) + "Predicted Output :" + str(o))
if (o != y[i]):
print(" \n updating Weight")
weight[0] = weight[0] + learning_rate * y[i] * a[i]
weight[1] = weight[1] + learning_rate * y[i] * b[i]
print(" updated Weights :" + str(weight[0]) + " , " + str(weight[1]))
i = -1
print(" \n Weights updated Training Again: ")
i=i+1
print("--------------------")
# summation = and_input[0] * weight[0] + and_input[1] * weight[1]
# return activation(summation, threshold)
#and_input = [1, 0]
#print("AND Gate output for " + str(and_input) + " : " +
str(perceptron(and_input)))
perceptron()
3.backprpagation.py
import pandas as pd
import numpy as np
df = pd.read_csv("D:\AI\Iris.csv")
print(df.head(100))
y= pd.get_dummies(df.Species).values
X= df.drop(["Id","Species"],axis= 1).values
print(X)
print(y)
def sigmoid(X):
return 1/(1+np.exp(-X))
learning_rate =0.000001
N =y.size
n_input =4
n_hidden =2
n_output =3
np.random.seed(10)
weights_1 = np.random.normal(scale =0.5,size =(n_input,n_hidden))#(4,2)
weights_2 =np.random.normal(scale = 0.5,size = (n_hidden,n_output))#(2,3)
hidden_layer_inputs = np.dot(X,weights_1)
hidden_layer_outputs = sigmoid(hidden_layer_inputs)
output_layer_inputs = np.dot(hidden_layer_outputs,weights_2)
output_layer_outputs = sigmoid(output_layer_inputs)
#Backpropagation
output_layer_error = output_layer_outputs - y
output_layer_error_delta = output_layer_error * output_layer_outputs *(1-
output_layer_outputs)
hidden_layer_error = np.dot(output_layer_error_delta,weights_2.T)
hidden_layer_error_delta =hidden_layer_error *hidden_layer_outputs * (1-
hidden_layer_outputs)
# weight update
weights_2_update = np.dot(hidden_layer_outputs.T,output_layer_error_delta)
weights_1_update = np.dot(X.T,hidden_layer_error_delta)
weights_2 = weights_2 + learning_rate * weights_2_update
weights_1 = weights_1 + learning_rate * weights_1_update
mse = ((output_layer_outputs -y)**2).sum() / N
print(mse)
print(weights_2)
print(weights_1)
4.leastmeansquare.py
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
X_data = np.array([160,171,182,180,154],ndmin = 2)
print(X_data)
X_data = X_data.reshape((5,1))
print(X_data)
y_data = np.array([72,76,77,83,76])
print(y_data)
model = LinearRegression()
model.fit(X_data,y_data)
print(model.predict([[176]]))
y_pred = model.predict(X_data)
print("prediction value", y_pred)
sse = (y_data - y_pred)**2
mse = np.mean(sse)
print(mse)
print(mean_squared_error(y_data,y_pred))
Chapter -9
1.hebb1.py
learning_rate = 0.2
total_cell = 16
activation_weight = total_cell
def compute_activations():
for out in range(total_cell):
#outputs[out] = sum(weights[out][weight] * inputs[weight]
outputs[out] =0
for weight in range(activation_weight):
outputs[out] += weights[out][weight] * inputs[weight]
outputs[out] = 1.0 if outputs[out] > 0.0 else -1.0
print(outputs)
def define_pattern(inp):
for i in range(total_cell):
inputs[i] = inp[i]
outputs[i] = float(inp[i])
def adjust_weights():
for out in range(total_cell):
for weight in range(activation_weight):
weights[out][weight] += learning_rate * (outputs[out] *
inputs[weight])
def fill_pattern(x):
return ' ' if x > 0.0 else '*'
def show_activations():
compute_activations()
print()
for i in range(0, total_cell, 4):
print(
f"{fill_pattern(inputs[i+0])} {fill_pattern(inputs[i+1])} "
f"{fill_pattern(inputs[i+2])} {fill_pattern(inputs[i+3])} -- "
f"{fill_pattern(outputs[i+0])} {fill_pattern(outputs[i+1])} "
f"{fill_pattern(outputs[i+2])} {fill_pattern(outputs[i+3])}"
)
def main():
initialize()
# Specify Patterns and train
patterns = [
[-1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1], # box pattern
[-1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1] # I pattern
]
if __name__ == "__main__":
main()
Chapter-10
1.