from
sklearn.datasets
import
make_regression
import
matplotlib.pyplot as plt
import
numpy as np
import
time
x, y
=
make_regression(n_samples
=
100
, n_features
=
1
,
n_informative
=
1
, noise
=
10
, random_state
=
42
)
plt.scatter(x, y, c
=
'red'
)
plt.xlabel(
'Feature'
)
plt.ylabel(
'Target_Variable'
)
plt.title(
'Training Data'
)
plt.show()
y
=
y.reshape(
100
,
1
)
num_iter
=
1000
alpha
=
0.01
m
=
len
(x)
theta
=
np.zeros((
2
,
1
),dtype
=
float
)
t0
=
t1
=
0
Grad0
=
Grad1
=
0
start_time
=
time.time()
for
i
in
range
(num_iter):
for
j
in
range
(m):
Grad0
=
Grad0
+
(theta[
0
]
+
theta[
1
]
*
x[j])
-
(y[j])
for
k
in
range
(m):
Grad1
=
Grad1
+
((theta[
0
]
+
theta[
1
]
*
x[k])
-
(y[k]))
*
x[k]
t0
=
theta[
0
]
-
(alpha
*
(
1
/
m)
*
Grad0)
t1
=
theta[
1
]
-
(alpha
*
(
1
/
m)
*
Grad1)
theta[
0
]
=
t0
theta[
1
]
=
t1
Grad0
=
Grad1
=
0
print
(
'model parameters:'
,theta,sep
=
'\n'
)
print
(
'Time Taken For Gradient Descent in Sec:'
,time.time()
-
start_time)
h
=
[]
for
i
in
range
(m):
h.append(theta[
0
]
+
theta[
1
]
*
x[i])
plt.plot(x,h)
plt.scatter(x,y,c
=
'red'
)
plt.xlabel(
'Feature'
)
plt.ylabel(
'Target_Variable'
)
plt.title(
'Output'
)