C++实现的BP神经网络(代码)
来源:天新网-开发中心 发布日期: 2009-06-18
#pragma hdrstop
#include <stdio.h>
#include <iostream.h>
const A=30.0;
const B=10.0;
const MAX=500; //最大训练次数
const COEF=0.0035; //网络的学习效率
const BCOEF=0.001;//网络的阀值调整效率
const ERROR=0.002 ; // 网络训练中的允许误差
const ACCURACY=0.0005;//网络要求精度
double sample[41][4]={{0,0,0,0},{5,1,4,19.020},{5,3,3,14.150},
{5,5,2,14.360},{5,3,3,14.150},{5,3,2,15.390},
{5,3,2,15.390},{5,5,1,19.680},{5,1,2,21.060},
{5,3,3,14.150},{5,5,4,12.680},{5,5,2,14.360},
{5,1,3,19.610},{5,3,4,13.650},{5,5,5,12.430},
{5,1,4,19.020},{5,1,4,19.020},{5,3,5,13.390},
{5,5,4,12.680},{5,1,3,19.610},{5,3,2,15.390},
{1,3,1,11.110},{1,5,2,6.521},{1,1,3,10.190},
{1,3,4,6.043},{1,5,5,5.242},{1,5,3,5.724},
{1,1,4,9.766},{1,3,5,5.870},{1,5,4,5.406},
{1,1,3,10.190},{1,1,5,9.545},{1,3,4,6.043},
{1,5,3,5.724},{1,1,2,11.250},{1,3,1,11.110},
{1,3,3,6.380},{1,5,2,6.521},{1,1,1,16.000},
{1,3,2,7.219},{1,5,3,5.724}};
double w[4][10][10],wc[4][10][10],b[4][10],bc[4][10];
double o[4][10],netin[4][10],d[4][10],differ;//单个样本的误差
double is; //全体样本均方差
int count,a;
void netout(int m, int n);//计算网络隐含层和输出层的输出
void calculd(int m,int n); //计算网络的反向传播误差
void calcalwc(int m,int n);//计算网络权值的调整量
void calcaulbc(int m,int n); //计算网络阀值的调整量
void changew(int m,int n); //调整网络权值
void changeb(int m,int n);//调整网络阀值
void clearwc(int m,int n);//清除网络权值变化量wc
void clearbc(int m,int n);//清除网络阀值变化量bc
void initialw(void);//初始化NN网络权值W
void initialb(void); //初始化NN网络阀值
void calculdiffer(void);//计算NN网络单个样本误差
void calculis(void);//计算NN网络全体样本误差
void trainNN(void);//训练NN网络
/*计算NN网络隐含层和输出层的输出 */
void netout(int m,int n)
{
int i,j,k;
//隐含层各节点的的输出
for (j=1,i=2;j<=m;j++) //m为隐含层节点个数
{
netin[i][j]=0.0;
for(k=1;k<=3;k++)//隐含层的每个节点均有三个输入变量
netin[i][j]=netin[i][j]+o[i-1][k]*w[i][k][j];
netin[i][j]=netin[i][j]-b[i][j];
o[i][j]=A/(1+exp(-netin[i][j]/B));
}
//输出层各节点的输出
for (j=1,i=3;j<=n;j++)
{
netin[i][j]=0.0;
for (k=1;k<=m;k++)
netin[i][j]=netin[i][j]+o[i-1][k]*w[i][k][j];
netin[i][j]=netin[i][j]-b[i][j];
o[i][j]=A/(1+exp(-netin[i][j]/B)) ;
}
}
/*计算NN网络的反向传播误差*/
void calculd(int m,int n)
{
int i,j,k;
double t;
a=count-1;
d[3][1]=(o[3][1]-sample[a][3])*(A/B)*exp(-netin[3][1]/B)/pow(1+exp(-netin[3][1]/B),2);
//隐含层的误差
for (j=1,i=2;j<=m;j++)
{
t=0.00;
for (k=1;k<=n;k++)
t=t+w[i+1][j][k]*d[i+1][k];
d[i][j]=t*(A/B)*exp(-netin[i][j]/B)/pow(1+exp(-netin[i][j]/B),2);
}
}
/*计算网络权值W的调整量*/
void calculwc(int m,int n)
{
int i,j,k;
// 输出层(第三层)与隐含层(第二层)之间的连接权值的调整
for (i=1,k=3;i<=m;i++)
{
for (j=1;j<=n;j++)
{
wc[k][i][j]=-COEF*d[k][j]*o[k-1][i]+0.5*wc[k][i][j];
}
// printf("\n");
}
//隐含层与输入层之间的连接权值的调整
for (i=1,k=2;i<=m;i++)
{
for (j=1;j<=m;j++)
{
wc[k][i][j]=-COEF*d[k][j]*o[k-1][i]+0.5*wc[k][i][j];
}
// printf("\n");
}
}
/*计算网络阀值的调整量*/
void calculbc(int m,int n)
{
int j;
for (j=1;j<=m;j++)
{
bc[2][j]=BCOEF*d[2][j];
}
for (j=1;j<=n;j++)
{
bc[3][j]=BCOEF*d[3][j];
}
}
/*调整网络权值*/
void changw(int m,int n)
{
int i,j;
for (i=1;i<=3;i++)
for (j=1;j<=m;j++)
{
w[2][i][j]=0.9*w[2][i][j]+wc[2][i][j];
//为了保证系统有较好的鲁棒性,计算权值时乘惯性系数0.9
printf("w[2][%d][%d]=%f\n",i,j,w[2][i][j]);
}
for (i=1;i<=m;i++)
for (j=1;j<=n;j++)
{
w[3][i][j]=0.9*w[3][i][j]+wc[3][i][j];
printf("w[3][%d][%d]=%f\n",i,j,w[3][i][j]);
}
}
/*调整网络阀值*/
void changb(int m,int n)
{
int j;
for (j=1;j<=m;j++)
b[2][j]=b[2][j]+bc[2][j];
for (j=1;j<=n;j++)
b[3][j]=b[3][j]+bc[3][j];
}
/*清除网络权值变化量wc*/
void clearwc(void)
{
for (int i=0;i<4;i++)
for (int j=0;j<10;j++)
for (int k=0;k<10;k++)
wc[i][j][k]=0.00;
}
/*清除网络阀值变化量*/
void clearbc(void)
{
for (int i=0;i<4;i++)
for (int j=0;j<10;j++)
bc[i][j]=0.00;
}
/*初始化网络权值W*/
void initialw(void)
{
int i,j,k,x;
double weight;
for (i=0;i<4;i++)
for (j=0;j<10;j++)
for (k=0;k<10;k++)
{
randomize();
x=100+random(400);
weight=(double)x/5000.00;
w[i][j][k]=weight;
}
}
/*初始化网络阀值*/
void initialb(void)
{
int i,j,x;
double fazhi;
for (i=0;i<4;i++)
for (j=0;j<10;j++)
{
randomize();
for (int k=0;k<12;k++)
{
x=100+random(400);
}
fazhi=(double)x/50000.00;
b[i][j]=fazhi;
}
}
/*计算网络单个样本误差*/
void calculdiffer(void)
{
a=count-1;
differ=0.5*(o[3][1]-sample[a][3])*(o[3][1]-sample[a][3]);
}
void calculis(void)
{
int i;
is=0.0;
for (i=0;i<=19;i++)
{
o[1][1]=sample[i][0];
o[1][2]=sample[i][1];
o[1][3]=sample[i][2];
netout(8,1);
is=is+(o[3][1]-sample[i][3])*(o[3][1]-sample[i][3]);
}
is=is/20;
}
/*训练网络*/
void trainNN(void)
{
long int time;
int i,x[4];
initialw();
initialb();
for (time=1;time<=MAX;time++)
{
count=0;
while(count<=40)
{
o[1][1]=sample[count][0];
o[1][2]=sample[count][1];
o[1][3]=sample[count][2];
count=count+1;
clearwc();
clearbc();
netout(8,1);
calculdiffer();
while(differ>ERROR)
{
calculd(8,1);
calculwc(8,1);
calculbc(8,1);
changw(8,1);
changb(8,1);
netout(8,1);
calculdiffer();
}
}
printf("This is %d times training NN...\n",time);
calculis();
printf("is==%f\n",is);
if (is<ACCURACY) break;
}
}
//---------------------------------------------------------------------------
#pragma argsused
int main(int argc, char* argv[])
{
double result;
int m,test[4];
char ch='y';
cout<<"Please wait for the train of NN:"<<endl;
trainNN();
cout<<"Now,this modular network can work for you."<<endl;
while(ch=='y' || ch=='Y')
{
cout<<"Please input data to be tested."<<endl;
for (m=1;m<=3;m++)
cin>>test[m];
ch=getchar();
o[1][1]=test[1];
o[1][2]=test[2];
o[1][3]=test[3];
netout(8,1);
result=o[3][1];
printf("Final result is %f.\n",result);
printf("Still test?[Yes] or [No]\n");
ch=getchar();
}
return 0;
}