Está en la página 1de 2

# /******* ANN Program Program…………

clc;
clear all;
input=2;
output=1;
hidden=6;
zA=[0,0];
zB=[0,1];
zC=[1,0];
zD=[1,1];
%zA=[0,0];
Z=[zA;zB;zC;zD];
dA=[0];
dB=[1];
dC=[0];
dD=[1];
D=[dA;dB;dC;dD];

eata=0.5;
emax=0.000001;
e=0;

W=randn(output,hidden) %1*6
V=randn(hidden,input) %6*2

for main_loop=1:5000
for p=1:4
z=transpose(Z(p,:));
d=transpose(D(p,:));
%calculate output of hidden and output layer
y=(tansig(V*(z)));%6x1
o=(tansig(W*(y)));%1x1

## %error value computed

e=0.5*norm(d-o)^2+e;

## %error signal value of both layers computed error signal vector

%for o/p layer
for k=1:output
delta_ok(k,:)=0.5*(d(k)-o(k))*(1-o(k)^2);
end

## %error signal vector for hidden layer

for j=1:hidden
sum=0;
for k=1:output
sum=sum+delta_ok(k)*W(k,j);
end
delta_yj(j,:)=0.5*(1-y(j)^2)*sum;
end
%step5 Adjust Weights of output and hidden layer
W=W+eata*delta_ok*transpose(y);
V=V+eata*delta_yj*transpose(z);
% q=q+1;% update step counter
v=main_loop;
y=e;
end
%8 training cycle is completed
fprintf('error=%f no. of epoches =%d\n',e,main_loop);
if e>= emax
e=0;
else
save backp.mat W V;
break;
end
end