ai int-1
ai int-1
# Find-S
-------------------------------------------------------------------
def FindS(data):
hypothesis=None
for row in data:
attributes=row[:-1]
label=row[-1]
if label=='Yes':
if hypothesis is None:
hypothesis=attributes.copy()
else:
for i in range(len(hypothesis)):
if hypothesis[i]!=row[i]:
hypothesis[i]='?'
return hypothesis
data = [
['Sunny', 'Warm', 'Normal', 'Strong', 'Warm', 'Same', 'Yes'],
['Sunny', 'Warm', 'High', 'Strong', 'Warm', 'Same', 'Yes'],
['Rainy', 'Cold', 'High', 'Strong', 'Warm', 'Change', 'No'],
['Sunny', 'Warm', 'High', 'Strong', 'Cool', 'Change', 'Yes']
]
print("Most specific hypothesis:",FindS(data))
-------------------------------------------------------------------
#KNN
-------------------------------------------------------------------
import pandas as pd
from sklearn.neighbours import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore")
data=pd.read_csv('iris.csv')
X=data.iloc[:,:-1]
y=data.iloc[:,-1]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3)
knn=KNeighboursClassifier(n_neighbours=3)
knn.fit(X_train,y_train)
print("Model built with accuracy:",knn.score(X_test,y_test))
-------------------------------------------------------------------
#Simple Linear Regression
-------------------------------------------------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import warnings
warnings.filterwarnings("ignore")
data=pd.read_csv('Salary_Data.csv')
X=data.iloc[:,0].values
y=data.iloc[:,1].values
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=42)
model=LinearRegression()
model.fit(X_train.reshape(-1,1),y_train)
print("Model built with accuracy:",model.score(X_test.reshape(-1,1),y_test))
plt.scatter(X_train,y_train)
x1,x2=X_train.min(),X_train.max()
y1,y2=model.predict([[x1],[x2]])
plt.plot([x1,x2],[y1,y2])
plt.show()
ip=input("Enter X:")
print("Prediction is:",model.predict([[float(ip)]])[0])
-------------------------------------------------------------------
#Locally Weighted Regression
-------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
class LocallyWeightedRegression:
def __init__(self, tau=0.1):
self.tau = tau
def kernel(self, query_point, X):
diff = X - query_point
return np.diag(np.exp(-np.sum(diff**2, axis=1) / (2 * self.tau ** 2)))
def predict(self, X, Y, query_point):
X_aug = np.hstack([X, np.ones((X.shape[0], 1))])
W = self.kernel(query_point, X)
theta = np.linalg.pinv(X_aug.T @ W @ X_aug) @ (X_aug.T @ W @ Y)
return np.dot(np.hstack([query_point, 1]), theta)
def fit_and_predict(self, X, Y, X_test):
return np.array([self.predict(X, Y, x) for x in X_test])
def plot(self, X, Y, Y_pred, X_test):
plt.scatter(X, Y, color='red', label='Training data')
plt.plot(X_test, Y_pred, color='green', label='Prediction')
plt.legend()
plt.show()
X=np.array([1, 2, 3, 4, 5]).reshape(-1, 1)
Y=np.array([1, 2, 1.3, 3.75, 2.25]).reshape(-1, 1)
X_test=np.linspace(1, 5, 100).reshape(-1, 1)
model=LocallyWeightedRegression(tau=0.5)
Y_pred=model.fit_and_predict(X, Y, X_test)
model.plot(X, Y, Y_pred, X_test)
-------------------------------------------------------------------
#Decision Tree
-------------------------------------------------------------------
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier,plot_tree
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
data=pd.read_csv("iris.csv")
X=data.iloc[:,:-1]
y=data.iloc[:,-1]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3)
model=DecisionTreeClassifier()
model.fit(X_train,y_train)
print(model.score(X_test,y_test))
plot_tree(model,filled=True,rounded=True)
-------------------------------------------------------------------
#Decision Tree without Built-In
-------------------------------------------------------------------
import numpy as np
class Node:
def __init__(self, data, target):
self.data = data
self.target = target
self.left = None
self.right = None
self.feature_index = None
self.threshold = None
self.result = None
def calculate_entropy(y):
unique_classes, class_counts = np.unique(y, return_counts=True)
probabilities = class_counts / len(y)
return -np.sum(probabilities * np.log2(probabilities))
total_entropy = calculate_entropy(y)
left_entropy = calculate_entropy(left_y)
right_entropy = calculate_entropy(right_y)
if n_features == 0:
leaf_node = Node(data=X, target=y)
leaf_node.result = np.argmax(np.bincount(y))
return leaf_node
if best_information_gain == 0:
leaf_node = Node(data=X, target=y)
leaf_node.result = np.argmax(np.bincount(y))
return leaf_node
return root
if __name__ == "__main__":
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
if prediction == 0:
print("Setosa")
elif prediction == 1:
print("Versicolor")
elif prediction == 2:
print("Virginica")
-------------------------------------------------------------------
# Perceptron for AND,OR
-------------------------------------------------------------------
import numpy as np
class Perceptron(object):
def __init__(self, input_size, lr=1, epochs=100):
self.W = np.zeros(input_size+1)
self.epochs = epochs
self.lr = lr
def activation_fn(self, x):
return 1 if x >= 0 else 0
def predict(self, x):
z = self.W.T.dot(x)
a = self.activation_fn(z)
return a
def fit(self, X, d):
for _ in range(self.epochs):
for i in range(d.shape[0]):
x = np.insert(X[i], 0, 1)
y = self.predict(x)
e = d[i] - y
self.W = self.W + self.lr * e * x
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])
d=np.array([0, 1, 1, 1])
perceptron=Perceptron(input_size=2)
perceptron.fit(X,d)
print("Perceptron weights for OR:",perceptron.W)
-------------------------------------------------------------------
#Perceptron Training
-------------------------------------------------------------------
import numpy as np
class Perceptron(object):
def __init__(self, input_size, lr=1, epochs=100):
self.W = np.zeros(input_size+1)
self.epochs = epochs
self.lr = lr
def predict(self,row,weights):
if weights==None :
weights=self.W
activation = weights[0]
for i in range(len(row)):
activation += weights[i + 1] * row[i]
return 1.0 if activation >= 0.0 else 0.0
def fit(self, X, d):
train=np.append(X, d, axis=1)
weights = [0.0 for i in range(len(train[0]))]
for epoch in range(self.epochs):
print(weights)
sum_error = 0.0
for row in train:
prediction = predict(row, weights)
error = row[-1] - prediction
sum_error += error**2
weights[0] = weights[0] + self.lr * error
for i in range(len(row)-1):
weights[i + 1] = weights[i + 1] + self.lr * error * row[i]
print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, self.lr,
sum_error))
self.W=weights.copy()
return weights
X = np.array([[2.7810836,2.550537003],
[1.465489372,2.362125076],
[3.396561688,4.400293529],
[1.38807019,1.850220317],
[3.06407232,3.005305973],
[7.627531214,2.759262235],
[5.332441248,2.088626775],
[6.922596716,1.77106367],
[8.675418651,-0.242068655],
[7.673756466,3.508563011]])
d = np.array([[0],[0],[0],[0],[0],[1],[1],[1],[1],[1]])
perceptron = Perceptron(input_size=2,lr=0.1,epochs=5)
perceptron.fit(X, d)
print("Final weights:",perceptron.W)