Machine learning CODE
Machine learning CODE
x = [5,7,8,7,2,17,2,9,4,11,12,9,6]
y = [99,86,87,88,111,86,103,87,94,78,77,85,86]
def myfunc(x):
return slope * x + intercept
import pandas as pd
svm.fit(X, y)
w = svm.coef_[0]
b=svm.intercept_[0]
plt.scatter(svm.support_vectors_[:, 0],
svm.support_vectors_[:, 1],
facecolors='none',
edgecolors='k',s=100,label='SupportVectors')
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.legend()
plt.grid(True)
plt.show()
Hebbian Learning-
import numpy as np
weights = np.zeros(num_inputs)
for i in range(len(inputs)):
rule: w = w + η *x * y
outputs[i]
return weights
inputs = np.array(
])
learning_rate = 0.1
# Train the network using Hebbian learning
learning_rate)
weights)
len(inputs) + 1)
weight_values = np.zeros((len(iterations),
len(weights)))
for i in range(len(iterations)):
weight_values[i] =
hebbian_learning(inputs[:i+1], outputs[:i+1],
learning_rate)
plt.ylabel('Weight Value')
plt.grid(True)
plt.show()
Expectation Maximization
Algorithm-
import numpy as np
def expectation_maximization(X,
np.random.seed(42)
X=np.array(X)
mu = np.array([np.min(X), np.max(X)])
variances
pi = np.full(num_clusters, 1 / num_clusters)
# Expectation Step
responsibilities = np.zeros((len(X),
num_clusters))
for j in range(num_clusters):
np.sqrt(sigma[j]))
responsibilities_sum =
responsibilities.sum(axis=1, keepdims=True) +
1e-8
responsibilities /= responsibilities_sum #
Normalize
# Maximization Step
N_k = responsibilities.sum(axis=0)
print(f"Means: {new_mu}")
print(f"Variances: {new_sigma}")
# Convergence Check
print("Converged!\n")
break
responsibilities
# Given dataset
# Run EM algorithm
responsibilities_final =
expectation_maximization(X)
print("Final Responsibilities:")
print(responsibilities_final)
McCulloch-Pitts model-
zip(inputs, weights))
# Define gates
AND_GATE = {
"threshold": 2
OR_GATE = {
"threshold": 1
}
# Test inputs
print("AND Gate:")
AND_GATE["weights"], AND_GATE["threshold"])
{output}")
print("\nOR Gate:")
OR_GATE["weights"], OR_GATE["threshold"])
{output}")
Single layer perceptron –
import numpy as np
# Forward pass: calculate the weighted sum and apply the activation
function
def forward(self, inputs):
weighted_sum = np.dot(inputs, self.weights) + self.bias
return self.activation(weighted_sum)
print("\nTesting OR Gate:")
for x in inputs:
print(f"Input: {x}, Output: {or_perceptron.forward(x)}")
def sigmoid_derivative(x):
return x * (1 - x)
# Training parameters
epochs = 10000
learning_rate = 0.1
# Training loop
for epoch in range(epochs):
# ---- Forward Pass ----
hidden_input = np.dot(X, W1) + b1 # Net input to hidden layer
hidden_output = sigmoid(hidden_input) # Activation of hidden layer
#Sampledataset(2D)
X=np.array([[2.5,2.4],
[0.5,0.7],
[2.2,2.9],
[1.9,2.2],
[3.1,3.0],
[2.3,2.7],
[2.0,1.6],
[1.0,1.1],
[1.5,1.6],
[1.1,0.9]])
#Step1:MeanCentering
mean_X=np.mean(X,axis=0)
X_centered=X-mean_X
#Step2:ComputeCovarianceMatrix
cov_matrix=np.cov(X_centered.T)
#Step3:ComputeEigenvaluesandEigenvectors
eigenvalues,eigenvectors=np.linalg.eig(cov_matrix)
#Step4:SortEigenvaluesandSelectPrincipalComponent
sorted_indices=np.argsort(eigenvalues)[::-1] #Sortindescendingorder
principal_component=eigenvectors[:,sorted_indices[0]] #Firsteigenvector