0% found this document useful (0 votes)
4 views

ai int-1

The document contains various implementations of machine learning algorithms including Find-S, KNN, Simple Linear Regression, Locally Weighted Regression, Decision Trees, and Perceptron models. Each section provides code examples for training and predicting outcomes using these algorithms on datasets like iris and salary data. Additionally, there are custom implementations of decision trees and perceptrons without relying on built-in libraries.

Uploaded by

omkarkoty2005
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
4 views

ai int-1

The document contains various implementations of machine learning algorithms including Find-S, KNN, Simple Linear Regression, Locally Weighted Regression, Decision Trees, and Perceptron models. Each section provides code examples for training and predicting outcomes using these algorithms on datasets like iris and salary data. Additionally, there are custom implementations of decision trees and perceptrons without relying on built-in libraries.

Uploaded by

omkarkoty2005
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 6

-------------------------------------------------------------------

# Find-S
-------------------------------------------------------------------
def FindS(data):
hypothesis=None
for row in data:
attributes=row[:-1]
label=row[-1]
if label=='Yes':
if hypothesis is None:
hypothesis=attributes.copy()
else:
for i in range(len(hypothesis)):
if hypothesis[i]!=row[i]:
hypothesis[i]='?'
return hypothesis
data = [
['Sunny', 'Warm', 'Normal', 'Strong', 'Warm', 'Same', 'Yes'],
['Sunny', 'Warm', 'High', 'Strong', 'Warm', 'Same', 'Yes'],
['Rainy', 'Cold', 'High', 'Strong', 'Warm', 'Change', 'No'],
['Sunny', 'Warm', 'High', 'Strong', 'Cool', 'Change', 'Yes']
]
print("Most specific hypothesis:",FindS(data))

-------------------------------------------------------------------
#KNN
-------------------------------------------------------------------
import pandas as pd
from sklearn.neighbours import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore")

data=pd.read_csv('iris.csv')
X=data.iloc[:,:-1]
y=data.iloc[:,-1]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3)
knn=KNeighboursClassifier(n_neighbours=3)
knn.fit(X_train,y_train)
print("Model built with accuracy:",knn.score(X_test,y_test))

l=["Sepal Length","Sepal Width","Petal Length","Petal Width"]


point=[]
for i in l:
v=float(input("Enter "+l+" : "))
point.append(v)
print("The predicted class is: ",knn.predict([point])[0])

-------------------------------------------------------------------
#Simple Linear Regression
-------------------------------------------------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import warnings
warnings.filterwarnings("ignore")
data=pd.read_csv('Salary_Data.csv')
X=data.iloc[:,0].values
y=data.iloc[:,1].values
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=42)
model=LinearRegression()
model.fit(X_train.reshape(-1,1),y_train)
print("Model built with accuracy:",model.score(X_test.reshape(-1,1),y_test))
plt.scatter(X_train,y_train)
x1,x2=X_train.min(),X_train.max()
y1,y2=model.predict([[x1],[x2]])
plt.plot([x1,x2],[y1,y2])
plt.show()
ip=input("Enter X:")
print("Prediction is:",model.predict([[float(ip)]])[0])

-------------------------------------------------------------------
#Locally Weighted Regression
-------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt

class LocallyWeightedRegression:
def __init__(self, tau=0.1):
self.tau = tau
def kernel(self, query_point, X):
diff = X - query_point
return np.diag(np.exp(-np.sum(diff**2, axis=1) / (2 * self.tau ** 2)))
def predict(self, X, Y, query_point):
X_aug = np.hstack([X, np.ones((X.shape[0], 1))])
W = self.kernel(query_point, X)
theta = np.linalg.pinv(X_aug.T @ W @ X_aug) @ (X_aug.T @ W @ Y)
return np.dot(np.hstack([query_point, 1]), theta)
def fit_and_predict(self, X, Y, X_test):
return np.array([self.predict(X, Y, x) for x in X_test])
def plot(self, X, Y, Y_pred, X_test):
plt.scatter(X, Y, color='red', label='Training data')
plt.plot(X_test, Y_pred, color='green', label='Prediction')
plt.legend()
plt.show()
X=np.array([1, 2, 3, 4, 5]).reshape(-1, 1)
Y=np.array([1, 2, 1.3, 3.75, 2.25]).reshape(-1, 1)
X_test=np.linspace(1, 5, 100).reshape(-1, 1)
model=LocallyWeightedRegression(tau=0.5)
Y_pred=model.fit_and_predict(X, Y, X_test)
model.plot(X, Y, Y_pred, X_test)

-------------------------------------------------------------------
#Decision Tree
-------------------------------------------------------------------
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier,plot_tree
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
data=pd.read_csv("iris.csv")
X=data.iloc[:,:-1]
y=data.iloc[:,-1]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3)
model=DecisionTreeClassifier()
model.fit(X_train,y_train)
print(model.score(X_test,y_test))
plot_tree(model,filled=True,rounded=True)

-------------------------------------------------------------------
#Decision Tree without Built-In
-------------------------------------------------------------------
import numpy as np

class Node:
def __init__(self, data, target):
self.data = data
self.target = target
self.left = None
self.right = None
self.feature_index = None
self.threshold = None
self.result = None

def calculate_entropy(y):
unique_classes, class_counts = np.unique(y, return_counts=True)
probabilities = class_counts / len(y)
return -np.sum(probabilities * np.log2(probabilities))

def calculate_information_gain(X, y, feature_index, threshold):


left_indices = X[:, feature_index] <= threshold
right_indices = ~left_indices
left_y = y[left_indices]
right_y = y[right_indices]

total_entropy = calculate_entropy(y)
left_entropy = calculate_entropy(left_y)
right_entropy = calculate_entropy(right_y)

p_left = len(left_y) / len(y)


p_right = len(right_y) / len(y)

return total_entropy - (p_left * left_entropy + p_right * right_entropy)

def build_tree(X, y, depth=0, max_depth=None):


n_samples, n_features = X.shape
unique_classes = np.unique(y)

if len(unique_classes) == 1 or (max_depth is not None and depth >= max_depth):


leaf_node = Node(data=X, target=y)
leaf_node.result = unique_classes[0]
return leaf_node

if n_features == 0:
leaf_node = Node(data=X, target=y)
leaf_node.result = np.argmax(np.bincount(y))
return leaf_node

best_feature_index, best_threshold, best_information_gain = None, None, -1

for feature_index in range(n_features):


unique_thresholds = np.unique(X[:, feature_index])
for threshold in unique_thresholds:
information_gain = calculate_information_gain(X, y, feature_index,
threshold)
if information_gain > best_information_gain:
best_feature_index, best_threshold, best_information_gain =
feature_index, threshold, information_gain

if best_information_gain == 0:
leaf_node = Node(data=X, target=y)
leaf_node.result = np.argmax(np.bincount(y))
return leaf_node

left_indices = X[:, best_feature_index] <= best_threshold


right_indices = ~left_indices

left_subtree = build_tree(X[left_indices], y[left_indices], depth + 1,


max_depth)
right_subtree = build_tree(X[right_indices], y[right_indices], depth + 1,
max_depth)

root = Node(data=X, target=y)


root.left = left_subtree
root.right = right_subtree
root.feature_index = best_feature_index
root.threshold = best_threshold

return root

def predict(node, X):


if node.result is not None:
return node.result
if X[node.feature_index] <= node.threshold:
return predict(node.left, X)
else:
return predict(node.right, X)

if __name__ == "__main__":
from sklearn.datasets import load_iris

iris = load_iris()
X = iris.data
y = iris.target

tree = build_tree(X, y, max_depth=3)

new_data_point = np.array([5.1, 3.5, 1.4, 0.2])


prediction = predict(tree, new_data_point)
print(f"Predicted class for input data: {prediction}")

if prediction == 0:
print("Setosa")
elif prediction == 1:
print("Versicolor")
elif prediction == 2:
print("Virginica")

-------------------------------------------------------------------
# Perceptron for AND,OR
-------------------------------------------------------------------
import numpy as np
class Perceptron(object):
def __init__(self, input_size, lr=1, epochs=100):
self.W = np.zeros(input_size+1)
self.epochs = epochs
self.lr = lr
def activation_fn(self, x):
return 1 if x >= 0 else 0
def predict(self, x):
z = self.W.T.dot(x)
a = self.activation_fn(z)
return a
def fit(self, X, d):
for _ in range(self.epochs):
for i in range(d.shape[0]):
x = np.insert(X[i], 0, 1)
y = self.predict(x)
e = d[i] - y
self.W = self.W + self.lr * e * x
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])
d=np.array([0, 1, 1, 1])
perceptron=Perceptron(input_size=2)
perceptron.fit(X,d)
print("Perceptron weights for OR:",perceptron.W)

-------------------------------------------------------------------
#Perceptron Training
-------------------------------------------------------------------
import numpy as np
class Perceptron(object):
def __init__(self, input_size, lr=1, epochs=100):
self.W = np.zeros(input_size+1)
self.epochs = epochs
self.lr = lr
def predict(self,row,weights):
if weights==None :
weights=self.W
activation = weights[0]
for i in range(len(row)):
activation += weights[i + 1] * row[i]
return 1.0 if activation >= 0.0 else 0.0
def fit(self, X, d):
train=np.append(X, d, axis=1)
weights = [0.0 for i in range(len(train[0]))]
for epoch in range(self.epochs):
print(weights)
sum_error = 0.0
for row in train:
prediction = predict(row, weights)
error = row[-1] - prediction
sum_error += error**2
weights[0] = weights[0] + self.lr * error
for i in range(len(row)-1):
weights[i + 1] = weights[i + 1] + self.lr * error * row[i]
print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, self.lr,
sum_error))
self.W=weights.copy()
return weights
X = np.array([[2.7810836,2.550537003],
[1.465489372,2.362125076],
[3.396561688,4.400293529],
[1.38807019,1.850220317],
[3.06407232,3.005305973],
[7.627531214,2.759262235],
[5.332441248,2.088626775],
[6.922596716,1.77106367],
[8.675418651,-0.242068655],
[7.673756466,3.508563011]])
d = np.array([[0],[0],[0],[0],[0],[1],[1],[1],[1],[1]])
perceptron = Perceptron(input_size=2,lr=0.1,epochs=5)
perceptron.fit(X, d)
print("Final weights:",perceptron.W)

You might also like