Está en la página 1de 19

Writing batching backpropagation learning algorithm function

F_bp_batch.m

Comment part:
The heading of each line with a %% means a comment. Once you type
help F_bp_batch, those comments will display on the screen
consequently until a blank line.
%% function [A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,...
%% HLneurons,HL_TF,LearnRate,goal,Epochs,minLR,biasON,...
%% mu,plotPI)
%% Batch Error Back Propagation Algorithm
%% Calling and Returning examples
%% [A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,...
%% HL_TF,LearnRate,goal);
%% for 1-Hidden-layer only NN
%%
%% FILENAME: F_bp_batch.m
%% A function to do Batch Error BackPropagation learning
%% -------------------------------------------
%% P : Input patterns
%% T : Target patterns
%% HLneurons: [n1] for n1 neurons in Hidden layer
%% n1 must be integer
%% HL_TF : for {'tansig', 'purelin'}, use [2]
%% for {'logsig', 'purelin'}, use [1]
%% Only tansig, logsig allowed in Hidden layer.
%% TF for output layer is limited to purelin only.
%% LearnRate : Learning factor (0--1), real
%% goal: MSE, the small the better.
%% help F_bp_batch
%% ===================================
%% Examples: 2-2-2 NN. 2 classes needed
%% Define I/O. XOR, AND, and OR gates
%clear;P=[0 0 1 1;0 1 0 1];
%T=[0 1 1 0;0 0 0 1;0 1 1 1];
%HLneurons=4;HL_TF=1;LearnRate=[];goal=1e-7;
%[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
% LearnRate,goal);
%[T;A]
%% ===================================
%% Examples: 2-2-1 NN. See Kumar, NN, 2013
%% Define I/O. XOR gate
%clear;
%pattern=[0.1 0.1 0.95 0.95;0.1 0.95 0.1 0.95;0.1 ...
% 0.95 0.95 0.1];
%P=pattern([1 2],:);T=pattern(3,:);
%HLneurons=2;HL_TF=1;LearnRate=[];goal=1e-7;
%[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
% LearnRate,goal);
%[T;A]
%% ================================
%% [Calling example]
%clear;P=[0 0 1 1;0.5 1 2 10];
%T=[0.4 2.6 1.5 3.3;0 1 1 2];
%HLneurons=4;HL_TF=2;LearnRate=[];goal=1e-7;
%[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
% LearnRate,goal);
%disp('Upper half:Target, Lower:NN output');
%[T;A]
%% ================================
% [Calling example]
%clear;P=[0 0 1 1;0.5 10 20 100];
%[S,R]=size(P);
%T=[0.4 2.6 1.5 3.3;0 1 1 2];
%HLneurons=4;HL_TF=2;LearnRate=0.001;goal=1e-7;
%[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
% LearnRate,goal);
%disp('Upper half:Target, Lower:NN output');
%[T;A]
% % Initial LR=0.001000;Final MSE=9.98949e-008;
% % >>>bestEpoch=371867, momentum const=0.700000;
% % bestMSE=9.98949e-008;
% % >>>TF of Hidden-layer is (tansig2)
% % Upper half:Target, Lower:NN output
% % 0.4000 2.6000 1.5000 3.3000
% % 0 1.0000 1.0000 2.0000
% % 0.4001 2.5997 1.5001 3.3002
% % -0.0003 1.0006 0.9997 1.9998
%% help F_bp_batch

%% Written by PenChen Chou, 2002-7-25.


%% Revised on 2013-11-8, 2015-09-22. 2016-9-5.

function [A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,...
HL_TF,LearnRate,goal,Epochs,minLR,biasON,mu,plotPI)

SetRandSeed;
% Set defaults
[dimP,Samples]=size(P); [dimT,V]=size(T);
if Samples~=V,
error('Error! I/O sequence is not matched');
end
if nargin<=10 || isempty(plotPI),
plotPI=0;
end
if nargin<=9 || isempty(mu),
mu=0.7;
end
if nargin<=8 || isempty(biasON),
biasON=1;
end
if nargin<=7 || isempty(minLR),
minLR=1e-5;
end
if nargin<=6 || isempty(Epochs),
Epochs=1e6;
end
if nargin<=5 || isempty(goal),
goal=1e-6;
end
if nargin<=4 || isempty(LearnRate),
LearnRate=0.05;
end
LR1=LearnRate;
if nargin<=3 || isempty(HL_TF),
HL_TF=2; % tansig in HL, =1 means logsig
end
if nargin<=2 || isempty(HLneurons),
HLneurons=2; %single HL-layer with 2 neurons
end
%
% Weight and bias updates using EBP learning
if plotPI==1,
iters=zeros(1,Samples);SSEs=iters;minSSEs=iters;
end
minMSE=1e20;preMSE=minMSE+1;
SetRandSeed;
[dimP,Samples]=size(P);
[dimT,Samples1]=size(T);
if ~(Samples==Samples1), error ('Check P and T!');end;
W1=0.1*randn(HLneurons,dimP);b1=biasON*0.1*randn(HLneurons,1);
W2=0.1*randn(dimT,HLneurons);b2=biasON*0.1*randn(dimT,1);
dW1_old=0*W1;db1_old=0*b1;
dW2_old=0*W2;db2_old=0*b2;
dF2=zeros(size(T));Delta2=dF2;
dF1=zeros(HLneurons,Samples);Delta1=dF1;
sumx=zeros(1,Samples);counts=0;nextTurnOn=500;
if HLneurons>10, nextTurnOn=5; end;
if plotPI==0,
figure(168);clf;
end;
for epoch=1:Epochs,
%% Get err
n1=W1*P+b1*ones(1,Samples);
if HL_TF==2
a1=tansig2(n1);
elseif HL_TF==1
a1=logsig2(n1);
else
a1=n1;
end
A=W2*a1+b2*ones(1,Samples);
err=T-A;
% The last TF must be 'purelin'
for sample=1:Samples
for k=1:dimT
dF2(k,sample)=1;
Delta2(k,sample)=dF2(k,sample)*err(k,sample);
end
end
for sample=1:Samples
for j=1:size(a1,1)
sumx(sample)=0;
for k=1:size(A,1)
sumx(sample)=sumx(sample)+...
Delta2(k,sample)*W2(k,j);
end
if HL_TF==2,
dF1(j,sample)=dtansig2(n1(j,sample));
elseif HL_TF==1
dF1(j,sample)=dlogsig2(n1(j,sample));
else
dF1(j,sample)=1;
end
Delta1(j,sample)=dF1(j,sample)*sumx(sample);
end
end
% Updates for every pattern in
dW2=LearnRate*Delta2*a1'+mu*dW2_old;
W2=W2+dW2;
db2=LearnRate*sum(Delta2,2)+mu*db2_old;
b2=biasON*(b2+db2);
dW1=LearnRate*Delta1*P'+mu*dW1_old;
W1=W1+dW1;
db1=LearnRate*sum(Delta1,2)+mu*db1_old;
b1=biasON*(b1+db1);
MSE=mse(err);
if minMSE>MSE
minMSE=MSE;FW1=W1;FW2=W2;Fb1=b1;Fb2=b2;
bestEpoch=epoch;
LearnRate=min([0.1,LearnRate/0.9999]);
else
LearnRate=max([minLR,LearnRate*0.999]);
end
if plotPI==1,
if mod(epoch,100)==0 || epoch<100
minSSEs(epoch)=minMSE;iters(epoch)=epoch;
SSEs(epoch)=MSE;
end
end;
if epoch<300 || mod(epoch,nextTurnOn)==0,
fprintf('epoch=%4d,LR=%.6f;minMSE=%g;\n',...
epoch,LearnRate,minMSE);
if preMSE==minMSE,
counts=counts+1;
else
preMSE=minMSE;
end
if plotPI==0,
figure(168);
semilogx(epoch,20*log10(MSE),'.r');
hold on;
semilogx(epoch,20*log10(minMSE),'.');
end
end;
if minMSE<=goal, break; end;
if MSE>1000,
disp('Warning: too large MSE! Aborted!');
break;
end;
if counts>600, break; end;
% Updates
dW1_old=dW1;dW2_old=dW2;db1_old=db1;db2_old=db2;
end
% Print the last data
W1=FW1;W2=FW2;
b1=Fb1;b2=Fb2;
epoch=min([bestEpoch,Epochs]);
n1=W1*P+b1*ones(1,Samples);
if HL_TF==2
a1=tansig2(n1);
elseif HL_TF==1
a1=logsig2(n1);
else
a1=n1;
end
A=W2*a1+b2*ones(1,Samples);
err=T-A;
MSE=mse(err);
if minMSE>MSE, minMSE=MSE; end;
fprintf('Initial LR=%.6f;Final MSE=%g;\n',...
LR1,MSE);
fprintf('>>>bestEpoch=%4d, momentum const=%f;bestMSE=%g;\n',...
epoch,mu,minMSE);
if HL_TF==1,
fprintf('>>>TF of Hidden-layer is (logsig2)\n');
elseif HL_TF==2,
fprintf('>>>TF of Hidden-layer is (tansig2)\n');
else
fprintf('>>>TF of Hidden-layer is (purelin)\n');
end;
%% Remove unused data (0)
if plotPI==1,
II=find(iters==0);
iters(II)=[];SSEs(II)=[];minSSEs(II)=[];
%% Plot PI chart
X1=20*log10(SSEs);X2=20*log10(minSSEs);
figure(1108);semilogx(iters,X1,iters,X2,'r.');
title('PI plot in log format. minMSE(red),BETTER(<-80 dB)');
xlabel(['MSE=' num2str(minMSE) ': Generations']);grid on;
else
figure(168);
semilogx(epoch,20*log10(MSE),'.r');
semilogx(epoch,20*log10(minMSE),'.');
hold off;
title('PI plot in dB. minMSE(blue),MSE(red),BETTER(<-80 dB)');
xlabel(['minMSE=' num2str(minMSE) ': Generations']);grid on;
ylabel('dB: in F-bp-batch.m function file');
end;
fprintf('help F_bp_batch\n');

>> help F_bp_batch


function [A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,...
% HLneurons,HL_TF,LearnRate,goal,Epochs,minLR,biasON,...
% mu,plotPI)
% Batch Error Back Propagation Algorithm
% Calling and Returning examples
% [A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,...
% HL_TF,LearnRate,goal);
% for 1-Hidden-layer only NN
%
% FILENAME: F_bp_batch.m
% A function to do Batch Error BackPropagation learning
% -------------------------------------------
% P : Input patterns
% T : Target patterns
% HLneurons: [n1] for n1 neurons in Hidden layer
% n1 must be integer
% HL_TF : for {'tansig', 'purelin'}, use [2]
% for {'logsig', 'purelin'}, use [1]
% Only tansig, logsig allowed in Hidden layer.
% TF for output layer is limited to purelin only.
% LearnRate : Learning factor (0--1), real
% goal: MSE, the small the better.
% help F_bp_batch
% ===================================
% Examples: 2-2-2 NN. 2 classes needed
% Define I/O. XOR, AND, and OR gates
clear;P=[0 0 1 1;0 1 0 1];
T=[0 1 1 0;0 0 0 1;0 1 1 1];
HLneurons=4;HL_TF=1;LearnRate=[];goal=1e-7;
[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
LearnRate,goal);
[T;A]
% ===================================
% Examples: 2-2-1 NN.
% Define I/O. XOR gate
clear;
pattern=[0.1 0.1 0.95 0.95;0.1 0.95 0.1 0.95;0.1 ...
0.95 0.95 0.1];
P=pattern([1 2],:);T=pattern(3,:);
HLneurons=2;HL_TF=1;LearnRate=[];goal=1e-7;
[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
LearnRate,goal);
[T;A]
% ================================
% [Calling example]
clear;P=[0 0 1 1;0.5 1 2 10];
T=[0.4 2.6 1.5 3.3;0 1 1 2];
HLneurons=4;HL_TF=2;LearnRate=[];goal=1e-7;
[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
LearnRate,goal);
disp('Upper half:Target, Lower:NN output');
[T;A]
% ================================
[Calling example]
clear;P=[0 0 1 1;0.5 10 20 100];
[S,R]=size(P);
T=[0.4 2.6 1.5 3.3;0 1 1 2];
HLneurons=4;HL_TF=2;LearnRate=0.001;goal=1e-7;
[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
LearnRate,goal);
disp('Upper half:Target, Lower:NN output');
[T;A]
% Initial LR=0.001000;Final MSE=9.98949e-008;
% >>>bestEpoch=371867, momentum const=0.700000;
% bestMSE=9.98949e-008;
% >>>TF of Hidden-layer is (tansig2)
% Upper half:Target, Lower:NN output
% 0.4000 2.6000 1.5000 3.3000
% 0 1.0000 1.0000 2.0000
% 0.4001 2.5997 1.5001 3.3002
% -0.0003 1.0006 0.9997 1.9998
% help F_bp_batch

%[Calling example]
clear;P=[0 0 1 1;0.5 10 20 100];
[S,R]=size(P);
T=[0.4 2.6 1.5 3.3;0 1 1 2];
HLneurons=4;HL_TF=2;LearnRate=0.001;goal=1e-7;
[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
LearnRate,goal);
disp('Upper half:Target, Lower:NN output');
[T;A]
>> P

P =

0 0 1.0000 1.0000
0.5000 10.0000 20.0000 100.0000

>> T

T =

0.4000 2.6000 1.5000 3.3000


0 1.0000 1.0000 2.0000

epoch=40500,LR=0.000147;minMSE=1.12894e-007;
Initial LR=0.001000;Final MSE=9.98855e-008;
>>>bestEpoch=40809, momentum const=0.700000;bestMSE=9.98855e-008;
>>>TF of Hidden-layer is (tansig2)
>> [T;A]

ans =
0.4000 2.6000 1.5000 3.3000
0 1.0000 1.0000 2.0000
0.4002 2.5997 1.5002 3.3002
-0.0003 1.0006 0.9997 1.9999

PI plot in dB. minMSE(blue),MSE(red),BETTER(<-80 dB)


20

-20
dB: in F-bp-batch.m function file

-40

-60

-80

-100

-120

-140

-160
0 1 2 3 4 5
10 10 10 10 10 10
minMSE=9.9885e-008: Generations

help F_bp_batchepoch=335000,LR=0.000137;minMSE=1.02007e-007;
Initial LR=0.001000;Final MSE=9.99218e-008;
>>>bestEpoch=335064, momentum const=0.700000;bestMSE=9.99218e-008;
>>>TF of Hidden-layer is (tansig2)
>> A

A =

0.4001 2.5997 1.5002 3.3001


-0.0003 1.0006 0.9997 1.9998
PI plot in dB. minMSE(blue),MSE(red),BETTER(<-80 dB)
20

-20
dB: in F-bp-batch.m function file

-40

-60

-80

-100

-120

-140

-160
0 1 2 3 4 5 6
10 10 10 10 10 10 10
minMSE=9.9922e-008: Generations

%% function [A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,...
%% HLneurons,HL_TF,LearnRate,goal,Epochs,minLR,biasON,...
%% mu,plotPI)
%% Batch Error Back Propagation Algorithm
%% Calling and Returning examples
%% [A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,...
%% HL_TF,LearnRate,goal);
%% for 1-Hidden-layer only NN
%%
%% FILENAME: F_bp_batch.m
%% A function to do Batch Error BackPropagation learning
%% -------------------------------------------
%% P : Input patterns
%% T : Target patterns
%% HLneurons: [n1] for n1 neurons in Hidden layer
%% n1 must be integer
%% HL_TF : for {'tansig', 'purelin'}, use [2]
%% for {'logsig', 'purelin'}, use [1]
%% Only tansig, logsig allowed in Hidden layer.
%% TF for output layer is limited to purelin only.
%% LearnRate : Learning factor (0--1), real
%% goal: MSE, the small the better.
%% help F_bp_batch
%% ===================================
%% Examples: 2-2-2 NN. 2 classes needed
%% Define I/O. XOR, AND, and OR gates
%clear;P=[0 0 1 1;0 1 0 1];
%T=[0 1 1 0;0 0 0 1;0 1 1 1];
%HLneurons=4;HL_TF=1;LearnRate=[];goal=1e-7;
%[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
% LearnRate,goal);
%[T;A]
%% ===================================
%% Examples: 2-2-1 NN. See Kumar, NN, 2013
%% Define I/O. XOR gate
%clear;
%pattern=[0.1 0.1 0.95 0.95;0.1 0.95 0.1 0.95;0.1 ...
% 0.95 0.95 0.1];
%P=pattern([1 2],:);T=pattern(3,:);
%HLneurons=2;HL_TF=1;LearnRate=[];goal=1e-7;
%[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
% LearnRate,goal);
%[T;A]
%% ================================
%% [Calling example]
%clear;P=[0 0 1 1;0.5 1 2 10];
%T=[0.4 2.6 1.5 3.3;0 1 1 2];
%HLneurons=4;HL_TF=2;LearnRate=[];goal=1e-7;
%[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
% LearnRate,goal);
%disp('Upper half:Target, Lower:NN output');
%[T;A]
%% ================================
% [Calling example]
%clear;P=[0 0 1 1;0.5 10 20 100];
%[S,R]=size(P);
%T=[0.4 2.6 1.5 3.3;0 1 1 2];
%HLneurons=4;HL_TF=2;LearnRate=0.001;goal=1e-7;
%[A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,HL_TF,...
% LearnRate,goal);
%disp('Upper half:Target, Lower:NN output');
%[T;A]
% % Initial LR=0.001000;Final MSE=9.98949e-008;
% % >>>bestEpoch=371867, momentum const=0.700000;
% % bestMSE=9.98949e-008;
% % >>>TF of Hidden-layer is (tansig2)
% % Upper half:Target, Lower:NN output
% % 0.4000 2.6000 1.5000 3.3000
% % 0 1.0000 1.0000 2.0000
% % 0.4001 2.5997 1.5001 3.3002
% % -0.0003 1.0006 0.9997 1.9998
%% help F_bp_batch

%% Written by PenChen Chou, 2002-7-25.


%% Revised on 2013-11-8, 2015-09-22. 2016-9-5.

function [A,err,MSE,W1,b1,W2,b2]=F_bp_batch(P,T,HLneurons,...
HL_TF,LearnRate,goal,Epochs,minLR,biasON,mu,plotPI)

SetRandSeed;
% Set defaults
[dimP,Samples]=size(P); [dimT,V]=size(T);
if Samples~=V,
error('Error! I/O sequence is not matched');
end
if nargin<=10 || isempty(plotPI),
plotPI=0;
end
if nargin<=9 || isempty(mu),
mu=0.7;
end
if nargin<=8 || isempty(biasON),
biasON=1;
end
if nargin<=7 || isempty(minLR),
minLR=1e-5;
end
if nargin<=6 || isempty(Epochs),
Epochs=1e6;
end
if nargin<=5 || isempty(goal),
goal=1e-6;
end
if nargin<=4 || isempty(LearnRate),
LearnRate=0.05;
end
LR1=LearnRate;
if nargin<=3 || isempty(HL_TF),
HL_TF=2; % tansig in HL, =1 means logsig
end
if nargin<=2 || isempty(HLneurons),
HLneurons=2; %single HL-layer with 2 neurons
end
%
% Weight and bias updates using EBP learning
if plotPI==1,
iters=zeros(1,Samples);SSEs=iters;minSSEs=iters;
end
minMSE=1e20;preMSE=minMSE+1;
SetRandSeed;
[dimP,Samples]=size(P);
[dimT,Samples1]=size(T);
if ~(Samples==Samples1), error ('Check P and T!');end;
W1=0.1*randn(HLneurons,dimP);b1=biasON*0.1*randn(HLneurons,1);
W2=0.1*randn(dimT,HLneurons);b2=biasON*0.1*randn(dimT,1);
dW1_old=0*W1;db1_old=0*b1;
dW2_old=0*W2;db2_old=0*b2;
dF2=zeros(size(T));Delta2=dF2;
dF1=zeros(HLneurons,Samples);Delta1=dF1;
sumx=zeros(1,Samples);counts=0;nextTurnOn=500;
if HLneurons>10, nextTurnOn=5; end;
if plotPI==0,
figure(168);clf;
end;
for epoch=1:Epochs,
%% Get err
n1=W1*P+b1*ones(1,Samples);
if HL_TF==2
a1=tansig2(n1);
elseif HL_TF==1
a1=logsig2(n1);
else
a1=n1;
end
A=W2*a1+b2*ones(1,Samples);
err=T-A;
% The last TF must be 'purelin'
for sample=1:Samples
for k=1:dimT
dF2(k,sample)=1;
Delta2(k,sample)=dF2(k,sample)*err(k,sample);
end
end
for sample=1:Samples
for j=1:size(a1,1)
sumx(sample)=0;
for k=1:size(A,1)
sumx(sample)=sumx(sample)+...
Delta2(k,sample)*W2(k,j);
end
if HL_TF==2,
dF1(j,sample)=dtansig2(n1(j,sample));
elseif HL_TF==1
dF1(j,sample)=dlogsig2(n1(j,sample));
else
dF1(j,sample)=1;
end
Delta1(j,sample)=dF1(j,sample)*sumx(sample);
end
end
% Updates for every pattern in
dW2=LearnRate*Delta2*a1'+mu*dW2_old;
W2=W2+dW2;
db2=LearnRate*sum(Delta2,2)+mu*db2_old;
b2=biasON*(b2+db2);
dW1=LearnRate*Delta1*P'+mu*dW1_old;
W1=W1+dW1;
db1=LearnRate*sum(Delta1,2)+mu*db1_old;
b1=biasON*(b1+db1);
MSE=mse(err);
if minMSE>MSE
minMSE=MSE;FW1=W1;FW2=W2;Fb1=b1;Fb2=b2;
bestEpoch=epoch;
LearnRate=min([0.1,LearnRate/0.9999]);
else
LearnRate=max([minLR,LearnRate*0.999]);
end
if plotPI==1,
if mod(epoch,100)==0 || epoch<100
minSSEs(epoch)=minMSE;iters(epoch)=epoch;
SSEs(epoch)=MSE;
end
end;
if epoch<300 || mod(epoch,nextTurnOn)==0,
fprintf('epoch=%4d,LR=%.6f;minMSE=%g;\n',...
epoch,LearnRate,minMSE);
if preMSE==minMSE,
counts=counts+1;
else
preMSE=minMSE;
end
if plotPI==0,
figure(168);
semilogx(epoch,20*log10(MSE),'.r');
hold on;
semilogx(epoch,20*log10(minMSE),'.');
end
end;
if minMSE<=goal, break; end;
if MSE>1000,
disp('Warning: too large MSE! Aborted!');
break;
end;
if counts>600, break; end;
% Updates
dW1_old=dW1;dW2_old=dW2;db1_old=db1;db2_old=db2;
end
% Print the last data
W1=FW1;W2=FW2;
b1=Fb1;b2=Fb2;
epoch=min([bestEpoch,Epochs]);
n1=W1*P+b1*ones(1,Samples);
if HL_TF==2
a1=tansig2(n1);
elseif HL_TF==1
a1=logsig2(n1);
else
a1=n1;
end
A=W2*a1+b2*ones(1,Samples);
err=T-A;
MSE=mse(err);
if minMSE>MSE, minMSE=MSE; end;
fprintf('Initial LR=%.6f;Final MSE=%g;\n',...
LR1,MSE);
fprintf('>>>bestEpoch=%4d, momentum const=%f;bestMSE=%g;\n',...
epoch,mu,minMSE);
if HL_TF==1,
fprintf('>>>TF of Hidden-layer is (logsig2)\n');
elseif HL_TF==2,
fprintf('>>>TF of Hidden-layer is (tansig2)\n');
else
fprintf('>>>TF of Hidden-layer is (purelin)\n');
end;
%% Remove unused data (0)
if plotPI==1,
II=find(iters==0);
iters(II)=[];SSEs(II)=[];minSSEs(II)=[];
%% Plot PI chart
X1=20*log10(SSEs);X2=20*log10(minSSEs);
figure(1108);semilogx(iters,X1,iters,X2,'r.');
title('PI plot in log format. minMSE(red),BETTER(<-80 dB)');
xlabel(['MSE=' num2str(minMSE) ': Generations']);grid on;
else
figure(168);
semilogx(epoch,20*log10(MSE),'.r');
semilogx(epoch,20*log10(minMSE),'.');
hold off;
title('PI plot in dB. minMSE(blue),MSE(red),BETTER(<-80 dB)');
xlabel(['minMSE=' num2str(minMSE) ': Generations']);grid on;
ylabel('dB: in F-bp-batch.m function file');
end;
fprintf('help F_bp_batch\n');

También podría gustarte