202201154_Lab03
202201154_Lab03
Question 1
clc;
clear;
close all;
% Gradient function
grad_f = @(x) Q * x - b;
for k = 1:max_iter
grad = grad_f(x);
% Stop if the norm of the gradient is less than the threshold
if norm(grad) < tol
break;
end
% Gradient Descent update step
x = x - alpha_gd * grad;
% Store error norm (squared for better visualization)
error_vals_gd(k) = norm(x - x_opt, 2)^2;
iter_gd = iter_gd + 1; % Count iterations
end
iter_gd
iter_gd =
1
52
for k = 1:max_iter
grad = grad_f(x);
% Compute optimal step size: alpha = (grad' * grad) / (grad' * Q * grad)
alpha_sd = (grad' * grad) / (grad' * (Q * grad));
% Update step
x = x - alpha_sd * grad;
% Store error norm (squared)
error_vals_sd(k) = norm(x - x_opt, 2)^2;
iter_sd = iter_sd + 1; % Count iterations
% Stop if the norm of the gradient is less than the threshold
if norm(grad) < tol
break;
end
end
2
3