0% found this document useful (0 votes)
44 views

Lampiran A. Source Code Program: Universitas Sumatera Utara

This document contains source code for a neural network model written in MATLAB/Octave. It includes code to: 1) Read in training and target data, initialize network weights and biases randomly, and normalize the data. 2) Define a training loop that calculates outputs, errors, and adjustments to weights and biases to minimize the error over epochs using backpropagation. 3) Save the trained weights and display learning and prediction curves. Additional code loads the trained weights to make predictions on new data and plot the results.

Uploaded by

akwet
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
44 views

Lampiran A. Source Code Program: Universitas Sumatera Utara

This document contains source code for a neural network model written in MATLAB/Octave. It includes code to: 1) Read in training and target data, initialize network weights and biases randomly, and normalize the data. 2) Define a training loop that calculates outputs, errors, and adjustments to weights and biases to minimize the error over epochs using backpropagation. 3) Save the trained weights and display learning and prediction curves. Additional code loads the trained weights to make predictions on new data and plot the results.

Uploaded by

akwet
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 7

LAMPIRAN A.

SOURCE CODE PROGRAM

clc
clear all
close all

disp('7 20 20 2');
% Variables used:
% a(ar,ac),b(br,bc),B,c,d,D,del,delb,delw,der,e,E,i,j,k,l,N,no,O,r,resp,V,W,Y,Yin;
E=10;
d=0;

%Make Training Data


%getdata();
a=dlmread('data1in');
b=dlmread('data1out');
[ar,ac]=size(a);
[br,bc]=size(b);
if ac~=bc
error('no of input test vectors not equal to no of output test vectors ');
end

%Take inputs- Layers,Emax,


%Initial Weights & Biases
%================================
l=input('Enter no. of hidden layers ');
l=l+2;

no(1)=ar;
for i=2:l-1
no(i)=input(sprintf('Enter no of nodes in hidden layer %d: ',i-1 ));
end
no(l)=br;
Emax=input('Enter max expected error Emax ');

Universitas Sumatera Utara

[W,B,delw]=initialise(l,no);
%load weightmatrix W B delw;

%Normalize Data
%================================
for i=1:no(1)
m1(i)=mean(a(i,:));
v1(i)=std(a(i,:));
a(i,:)=(a(i,:)-m1(i))/v1(i);
end;

for i=1:no(l)
m2(i)=mean(b(i,:));
v2(i)=std(b(i,:));
b(i,:)=(b(i,:)-m2(i))/v2(i);
end;

%Generate Random Test Vector of length ac2


%================================
ac2=ceil(ac);
r=randint(1,ac2,[1,ac]);
%load matrices r;

for i=1:ac2
ra(:,i)=a(:,r(i));
rb(:,i)=b(:,r(i));
end
% pause();

figure('BackingStore','off');
hlr = uicontrol('Style','slider','value',.04,'Min',0,'Max',0.2,'SliderStep',[0.01
0.1],'Position', [75 7 150 20]);

Universitas Sumatera Utara

N = get(hlr,'value');
edr=uicontrol('Style','text','FontSize', 12,'string', num2str(N),'Position',[230 7 50 20]);
tic;
%Training Loop
%================================
while E>Emax
N = get(hlr,'value');
set(edr,'string',num2str(N));
E=0;
d=d+1;
r=randint(1,ac2,[1,ac2]);
for c=1:ac2

%Enter training vectors randomly...


for i=1:no(1)
Y(i,1)=ra(i,r(c));
V(i,1)=ra(i,r(c));
der(i,1)=1;
end

for i=1:no(l)
D(i)=rb(i,r(c));
end

for k=1:l-2
for j=1:no(k+1)
w=W(1:no(k),j,k);
y=Y(1:no(k),k);
V(j,k+1)=y'*w;
Y(j,k+1)=logsig(V(j,k+1)-B(j,k+1));
der(j,k+1)=dlogsig(V(j,k+1)-B(j,k+1),Y(j,k+1));
end
end

Universitas Sumatera Utara

k=l-1;
for j=1:no(l)
w=W(1:no(k),j,k);
y=Y(1:no(k),k);
V(j,l)=y'*w;
Y(j,l)=purelin(V(j,l)-B(j,l));
der(j,l)=dpurelin(V(j,l)-B(j,l));
end
%Computation of Error
for i=1:no(l)
e(i)=D(i)-Y(i,l);
E=E+0.5*(e(i)*e(i));
del(i,l)=der(i,l)*e(i);
end

for k=l-1:-1:1
for i=1:no(k)
w=W(i,1:no(k+1),k);
dd=del(1:no(k+1),k+1);
de=w*dd;
del(i,k)=de*der(i,k);
end
end
%Adjust Weights & Bias
for k=l-1:-1:1
for i=1:no(k)
for j=1:no(k+1)
delw(i,j,k)=N*del(j,k+1)*Y(i,k)+0.4*delw(i,j,k);;
W(i,j,k)=W(i,j,k)+delw(i,j,k);
end
end
end

for k=2:l

Universitas Sumatera Utara

for i=1:no(k)
delb(i,k)=N*(-1)*del(i,k);
B(i,k)=B(i,k)+delb(i,k);
end
end
end
err(d)=E;
if rem(d,100)==0
%plot after every 100 epochs
plot([1:d],err,'blu-',[1:d],Emax,'gre-');
save weightmatrix W B delw;
pause(0.05);

end;
end
%End of Training Loop
%================================
toc;
save norms m1 v1 m2 v2 no l;
plot(err),title('Learning Curve'),xlabel('No. of Epochs -----> ');
ylabel('Mean Square Error -----> ');
hold on;
plot([0:d],Emax,'gre');
grid on;

%End of Training Loop

function [W,B,delw]=initialise(l,no)

for k=1:l-1
for i=1:no(k)
for j=1:no(k+1)
W(i,j,k)=rand-0.5;
delw(i,j,k)=0;

Universitas Sumatera Utara

end;
end;
end;
disp('initial Weight Matrix W= ');
disp(W);

for k=2:l
for i=1:no(k)
B(i,k)=rand;
delb(i,k)=0;
end
end
%disp('Initial bias matrix is')
%disp(B);

clear all;
clc

a=dlmread('plot1inv');
b=dlmread('plot1out');
ain=dlmread('plot1in');
[ar,ac]=size(a);

load weightmatrix W B;
load norms m v no l;
f=3;

for i=1:no(1)
a(i,:)=(a(i,:)-m(i))/v(i);
ain(i,:)=(ain(i,:)-m(i))/v(i);
end;

for c=1:ac

Universitas Sumatera Utara

for i=1:no(1)
Y(i,1)=a(i,c);
V(i,1)=a(i,c);
end

for k=1:l-2
for j=1:no(k+1)
w=W(1:no(k),j,k);
y=Y(1:no(k),k);
V(j,k+1)=y'*w;
Y(j,k+1)=logsig(V(j,k+1)-B(j,k+1));
end
end

k=l-1;
for j=1:no(l)
w=W(1:no(k),j,k);
y=Y(1:no(k),k);
V(j,l)=y'*w;
Y(j,l)=purelin(V(j,l)-B(j,l));
O(j,c)=Y(j,l);
end
end

figure;
plot(ain(f,:),b(1,:),'-*gre',a(f,:),O(1,:),'blu-');
xlabel('Normalized Stub Length'),ylabel('Real S(1,1)');
legend('Actual Plot','Obtained Curve');
figure;
plot(ain(f,:),b(2,:),'-*gre',a(f,:),O(2,:),'blu-');
xlabel('Normalized Stub Length'),ylabel('Imag S(1,1)');
legend('Actual Plot','Obtained Curve');

Universitas Sumatera Utara

You might also like