Skip to content

Commit

Permalink
现代算法
Browse files Browse the repository at this point in the history
  • Loading branch information
zhanwen committed Sep 16, 2020
1 parent 8904301 commit c08b511
Show file tree
Hide file tree
Showing 72 changed files with 926 additions and 0 deletions.
Binary file added 现代算法/.DS_Store
Binary file not shown.
121 changes: 121 additions & 0 deletions 现代算法/小波分析/xiaobofenxi.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
function main()
clc;clear all;close all;
%用Mexihat函数作为样本输入和输出
x=0:0.03:3; %样本输入值
c=2/(sqrt(3).*pi.^(1/4));
d=1/sqrt(2);
u=x/2-1;
targ=d.*c.*exp(-u.^2/2).*(1-u.^2); % 目标函数的样本输出值
eta=0.02;aerfa=0.735; %赋予网络学习速率和动量因子初始值
%初始化输出层和隐层的连接权wjh和隐层和输出层的连接权.
%假设小波函数节点数为H,样本数为P,输出节点数为J,输入节点数为I.
H=15;P=2;I=length(x);J=length(targ);
b=rand(H,1);a=rand(H,1); %初始化小波参数
whi=rand(I,H);wjh=rand(H,J); %初始化权系数;
b1=rand(H,1);b2=rand(J,1);%阈值初始化;
p=0;
Err_NetOut=[];%保存的误差;
flag=1;count=0;
while flag>0
flag=0;
count=count+1;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
xhp1=0;
for h=1:H
for i=1:I
xhp1=xhp1+whi(i,h)*x(i);
end
ixhp(h)=xhp1+b1(h);
xhp1=0;
end
for h=1:H
oxhp(h)=fai((ixhp(h)-b(h))/a(h));
end
ixjp1=0;
for j=1:J
for h=1:H
ixjp1=ixjp1+wjh(h,j)*oxhp(h);
end
ixjp(j)=ixjp1+b2(j);
ixjp1=0;
end
for i=1:J
oxjp(i)=fnn(ixjp(i));
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
wuchayy=1/2*sumsqr(oxjp-targ);
Err_NetOut=[Err_NetOut wuchayy];%保存每次的误差;
%求解小波网络运用BP算法,各参数每次学习的调整量
for j=1:J
detaj(j)=-(oxjp(j)-targ(j))*oxjp(j)*(1-oxjp(j));
end
for j=1:J
for h=1:H
detawjh(h,j)=eta*detaj(j)*oxhp(h);
end
end
detab2=eta*detaj;
sum=0;
for h=1:H
for j=1:J
sum=detaj(j)*wjh(h,j)*diffai((ixhp(h)-b(h))/a(h))/a(h)+sum;
end
detah(h)=sum;
sum=0;
end
for h=1:H
for i=1:I
detawhi(i,h)=eta*detah(h)*x(i);
end
end
detab1=eta*detah;
detab=-eta*detah;
for h=1:H
detaa(h)=-eta*detah(h)*((ixhp(h)-b(h))/a(h));
end
%引入动量因子aerfa,加快收敛速度和阻碍陷入局部极小值.
wjh=wjh+(1+aerfa)*detawjh;
whi=whi+(1+aerfa)*detawhi;
a=a+(1+aerfa)*detaa';
b=b+(1+aerfa)*detab';
b1=b1+(1+aerfa)*detab1';
b2=b2+(1+aerfa)*detab2';
%本算法采用的是样本逐个处理而不是数据批处理
p=p+1;
if p~=P
flag=flag+1;
else
if Err_NetOut(end)>0.008
flag=flag+1;
else
figure;
plot(Err_NetOut);
xlabel('网络学习的次数');ylabel('网络输出的误差');
title('网络学习误差曲线','fontsize',20,'color',[0 1 1],'fontname','隶书');
end
end
if count>6000
figure(1);
subplot(1,2,1)
plot(Err_NetOut,'color','b','linestyle','-','linewidth',2.2,...
'marker','^','markersize',3.5);
xlabel('网络学习的次数');ylabel('网络输出的误差');
title('误差曲线','fontsize',20,'color',[1 1 1],'fontname','隶书');
subplot(1,2,2)
handle1=plot(x,targ,'color','r','linestyle','--','linewidth',2.2,...
'marker','p','markersize',3.5);
hold on
handle1=plot(x,oxjp,'color','g','linestyle','-.','linewidth',2.2,...
'marker','d','markersize',3.5);
xlabel('样本输入值');ylabel('样本目标值与网络输出值');
title('目标值与网络输出值比较','fontsize',20,'color',[1 1 1],'fontname','隶书');
legend('样本目标值','网络仿真值');
break;
end
end
function y3=diffai(x) %子程序
y3=-1.75*sin(1.75*x).*exp(-x.^2/2)-cos(1.75*x).*exp(-x.^2/2).*x;
function yl=fai(x) %子程序
yl=cos(1.75.*x).*exp(-x.^2/2);
function y2=fnn(x) %子程序
y2=1/(1+exp(-x));
127 changes: 127 additions & 0 deletions 现代算法/模拟退火法/monituihuo.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
clear
clc
a = 0.99; % �¶�˥�������IJ���
t0 = 97; tf = 3; t = t0;
Markov_length = 10000; % Markov������
coordinates = [
1 565.0 575.0; 2 25.0 185.0; 3 345.0 750.0;
4 945.0 685.0; 5 845.0 655.0; 6 880.0 660.0;
7 25.0 230.0; 8 525.0 1000.0; 9 580.0 1175.0;
10 650.0 1130.0; 11 1605.0 620.0; 12 1220.0 580.0;
13 1465.0 200.0; 14 1530.0 5.0; 15 845.0 680.0;
16 725.0 370.0; 17 145.0 665.0; 18 415.0 635.0;
19 510.0 875.0; 20 560.0 365.0; 21 300.0 465.0;
22 520.0 585.0; 23 480.0 415.0; 24 835.0 625.0;
25 975.0 580.0; 26 1215.0 245.0; 27 1320.0 315.0;
28 1250.0 400.0; 29 660.0 180.0; 30 410.0 250.0;
31 420.0 555.0; 32 575.0 665.0; 33 1150.0 1160.0;
34 700.0 580.0; 35 685.0 595.0; 36 685.0 610.0;
37 770.0 610.0; 38 795.0 645.0; 39 720.0 635.0;
40 760.0 650.0; 41 475.0 960.0; 42 95.0 260.0;
43 875.0 920.0; 44 700.0 500.0; 45 555.0 815.0;
46 830.0 485.0; 47 1170.0 65.0; 48 830.0 610.0;
49 605.0 625.0; 50 595.0 360.0; 51 1340.0 725.0;
52 1740.0 245.0;
];
coordinates(:,1) = [];
amount = size(coordinates,1); % ���е���Ŀ
% ͨ���������ķ�������������
dist_matrix = zeros(amount, amount);
coor_x_tmp1 = coordinates(:,1) * ones(1,amount);
coor_x_tmp2 = coor_x_tmp1';
coor_y_tmp1 = coordinates(:,2) * ones(1,amount);
coor_y_tmp2 = coor_y_tmp1';
dist_matrix = sqrt((coor_x_tmp1-coor_x_tmp2).^2 + ...
(coor_y_tmp1-coor_y_tmp2).^2);

sol_new = 1:amount; % ������ʼ��
% sol_new��ÿ�β������½⣻sol_current�ǵ�ǰ�⣻sol_best����ȴ�е���ý⣻
E_current = inf;E_best = inf; % E_current�ǵ�ǰ���Ӧ�Ļ�·���룻
% E_new���½�Ļ�·���룻
% E_best�����Ž��
sol_current = sol_new; sol_best = sol_new;
p = 1;

while t>=tf
for r=1:Markov_length % Markov������
% ��������Ŷ�
if (rand < 0.5) % ��������ǽ�������������������
% ������
ind1 = 0; ind2 = 0;
while (ind1 == ind2)
ind1 = ceil(rand.*amount);
ind2 = ceil(rand.*amount);
end
tmp1 = sol_new(ind1);
sol_new(ind1) = sol_new(ind2);
sol_new(ind2) = tmp1;
else
% ������
ind1 = 0; ind2 = 0; ind3 = 0;
while (ind1 == ind2) || (ind1 == ind3) ...
|| (ind2 == ind3) || (abs(ind1-ind2) == 1)
ind1 = ceil(rand.*amount);
ind2 = ceil(rand.*amount);
ind3 = ceil(rand.*amount);
end
tmp1 = ind1;tmp2 = ind2;tmp3 = ind3;
% ȷ��ind1 < ind2 < ind3
if (ind1 < ind2) && (ind2 < ind3)
;
elseif (ind1 < ind3) && (ind3 < ind2)
ind2 = tmp3;ind3 = tmp2;
elseif (ind2 < ind1) && (ind1 < ind3)
ind1 = tmp2;ind2 = tmp1;
elseif (ind2 < ind3) && (ind3 < ind1)
ind1 = tmp2;ind2 = tmp3; ind3 = tmp1;
elseif (ind3 < ind1) && (ind1 < ind2)
ind1 = tmp3;ind2 = tmp1; ind3 = tmp2;
elseif (ind3 < ind2) && (ind2 < ind1)
ind1 = tmp3;ind2 = tmp2; ind3 = tmp1;
end

tmplist1 = sol_new((ind1+1):(ind2-1));
sol_new((ind1+1):(ind1+ind3-ind2+1)) = ...
sol_new((ind2):(ind3));
sol_new((ind1+ind3-ind2+2):ind3) = ...
tmplist1;
end

%����Ƿ�����Լ��

% ����Ŀ�꺯��ֵ�������ܣ�
E_new = 0;
for i = 1 : (amount-1)
E_new = E_new + ...
dist_matrix(sol_new(i),sol_new(i+1));
end
% �����ϴ����һ�����е���һ�����еľ���
E_new = E_new + ...
dist_matrix(sol_new(amount),sol_new(1));

if E_new < E_current
E_current = E_new;
sol_current = sol_new;
if E_new < E_best
% ����ȴ��������õĽⱣ������
E_best = E_new;
sol_best = sol_new;
end
else
% ���½��Ŀ�꺯��ֵС�ڵ�ǰ��ģ�
% �����һ�����ʽ����½�
if rand < exp(-(E_new-E_current)./t)
E_current = E_new;
sol_current = sol_new;
else
sol_new = sol_current;
end
end
end
t=t.*a; % ���Ʋ���t���¶ȣ�����Ϊԭ����a��
end

disp('���Ž�Ϊ��')
disp(sol_best)
disp('��̾��룺')
disp(E_best)
Binary file not shown.
Binary file not shown.
Binary file added 现代算法/神经网络/.DS_Store
Binary file not shown.
Binary file not shown.
Binary file not shown.
13 changes: 13 additions & 0 deletions 现代算法/神经网络/Lecture 2/SIsin.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
u=[-1:0.05:1];
% t=sin(2*pi*u);
t=0.5+0.5*sin(2*pi*u);
% try to use different f and training algo.
net=newff([-1 1],[10 1],{'tansig','purelin'},'trainlm');
% training param.
net.trainParam.goal=1e-8;
net.trainParam.epochs=10000;

[net,tr]=train(net,u,t);
y=sim(net,u);
% plot(u,t,'b',u,y,'r');
% nnd9sdq
Binary file added 现代算法/神经网络/Lecture 2/bps.ppt
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added 现代算法/神经网络/lecture 16/alphabet.rar
Binary file not shown.
Loading

0 comments on commit c08b511

Please sign in to comment.