bp神经网络详细步骤C实现.docx

上传人:b****5 文档编号:6528209 上传时间:2023-01-07 格式:DOCX 页数:13 大小:17.41KB
下载 相关 举报
bp神经网络详细步骤C实现.docx_第1页
第1页 / 共13页
bp神经网络详细步骤C实现.docx_第2页
第2页 / 共13页
bp神经网络详细步骤C实现.docx_第3页
第3页 / 共13页
bp神经网络详细步骤C实现.docx_第4页
第4页 / 共13页
bp神经网络详细步骤C实现.docx_第5页
第5页 / 共13页
点击查看更多>>
下载资源
资源描述

bp神经网络详细步骤C实现.docx

《bp神经网络详细步骤C实现.docx》由会员分享,可在线阅读,更多相关《bp神经网络详细步骤C实现.docx(13页珍藏版)》请在冰豆网上搜索。

bp神经网络详细步骤C实现.docx

bp神经网络详细步骤C实现

usingSystem;

using;

usingSystem.Linq;

usingSystem.Text;

usingSystem;

usingSystem.10;

usingSystem.Text;

namespaceBpANNet

{

〃/

///BpNet的摘要说明'

///

publicclassBpNet

publicintinNum;〃输入节点数

inthideNum;//隐层节点数

publicintoutNum;//输出层节点数

publicintsampleNum;//样本总数

RandomR;

double[]x;〃输入节点的输入数据

double[]x1;〃隐层节点的输出

double[]x2;〃输出节点的输出

double[]o1;//隐层的输入

double[]o2;//输出层的输入

publicdouble[,]w;〃权值矩阵w,这是输入层与隐藏层之间的权值矩阵

publicdouble[,]v;〃权值矩阵V,这是隐藏层与输出层之间的权值矩阵

publicdouble[,]dw;〃权值矩阵w

publicdouble[,]dv;〃权值矩阵Vpublicdoublerate;//学习率publicdouble[]b1;〃隐层阈值矩阵publicdouble[]b2;〃输出层阈值矩阵publicdouble[]db1;〃隐层阈值矩阵publicdouble[]db2;〃输出层阈值矩阵double[]pp;〃隐藏层的误差double[]qq;〃输出层的误差

double[]yd;〃输出层的教师数据,所谓教师数据就是实际数据而

已!

publicdoublee;//均方误差

doublein_rate;〃归一化比例系数

//用于确定隐藏层的神经细胞数

publicintcomputeHideNum(intm,intn)

{

doubles=Math.Sqrt(0.43*m*n+0.12*n*n+2.54*m+0.77*n+0.35)

+0.51;

intss=Convert.Tolnt32(s);

return((s-(double)ss)>0.5)?

ss+1:

ss;

}

publicBpNet(double[,]p,double[,]t)

//构造函数逻辑

R=newRandom();

this.inNum=p.GetLength

(1);

this.outNum=t.GetLength

(1);

this.hideNum=computeHideNum(inNum,outNum);

〃this.hideNum=18;

this.sampleNum=p.GetLength(O);

Console.WriteLine("输入节点数目:

"+inNum);

Console.WriteLine(”隐层节点数目:

"+hideNum);

Console.WriteLine("输出层节点数目:

"+outNum);

Console.ReadLine();

//将这些矩阵规定好矩阵大小

x=newdouble[inNum];

x1=newdouble[hideNum];

x2=newdouble[outNum];

o1=newdouble[hideNum];

o2=newdouble[outNum];

w=newdouble[inNum,hideNum];〃权值矩阵w,这是输入层与隐

藏层之间的权值矩阵

v=newdouble[hideNum,outNum];

dw=newdouble[inNum,hideNum];

dv=newdouble[hideNum,outNum];

//阈值

b1=newdouble[hideNum];

b2=newdouble[outNum];

db1=newdouble[hideNum];

db2=newdouble[outNum];

//误差

pp=newdouble[hideNum];〃隐藏层的误差

qq=newdouble[outNum];〃输出层的误差

yd=newdouble[outNum];〃输出层的教师数据

//初始化w

for(inti=O;i

{

for(intj=O;j

//NextDouble返回一个介于0.0和1.0之间的随机数。

w[i,j]=(R.NextDouble()*2-1.0)/2;

}

}

〃初始化v

for(inti=0;i

{

for(intj=0;j

{

v[i,j]=(R.NextDouble()*2-1.0)/2;

}

}

rate=0.8;

e=0.0;

in_rate=1.0;?

}

//训练函数publicvoidtrain(double[,]p,double[,]t)

e=0.0;

//★求p,t中的最大值doublepMax=0.0;

//sampleNum为样本总数for(intisamp=0;isamp

//inNum是输入层的节点数(即神经细胞数)for(inti=0;i

{if(Math.Abs(p[isamp,i])>pMax)

{pMax=Math.Abs(p[isamp,i]);

}

}for(intj=0;j

{if(Math.Abs(t[isamp,j])>pMax)

{pMax=Math.Abs(t[isamp,j]);

}

}

in_rate=pMax;

}//endisampfor(intisamp=0;isamp

{

//★数据归一化

for(inti=0;i

{

x[i]=p[isamp,i]/in_rate;

}

for(inti=0;i

{

yd[i]=t[isamp,i]/in_rate;

}

//计算隐层的输入和输出

for(intj=0;j

{

o1[j]=0.0;

for(inti=0;i

{

o1[j]+=w[i,j]*x[i];//权值”*输“入”的那个累加的过程

}

-1”的累加

//这个b1[j]就是隐藏层的阈值,阈值就是一个输入为值

x1[j]=1.0/(1.0+Math.Exp(-o1[j]-b1[j]));

}

//计算输出层的输入和输出for(intk=0;k

{

o2[k]=0.0;

for(intj=0;j

{

o2[k]+=v[j,k]*x1[j];

}x2[k]=1.0/(1.0+Math.Exp(-o2[k]-b2[k]));

}

//计算输出层误差和均方差

{

//yd[k]是输出层的教师数据,所谓教师数据就是实际应该输出的数据而已

qq[k]=(yd[k]-x2[k])*x2[k]*(1.0-x2[k]);

e+=(yd[k]-x2[k])*(yd[k]-x2[k]);

//更新V,V矩阵是隐藏层与输出层之间的权值for(intj=0;j

{

v[j,k]+=rate*qq[k]*x1[j];

}

}

//计算隐层误差

for(intj=0;j

{

//PP矩阵是隐藏层的误差

pp[j]=0.0;

//算法参考我的视频截图

{

pp[j]+=qq[k]*v[j,k];

}

PP[j]=PP[j]*x1[j]*(1-x1[j]);

〃更新w

for(inti=O;i

{

w[i,j]+=rate*pp[j]*x[i];

}

}

〃更新b2,输出层的阈值

for(intk=0;k

{

b2[k]+=rate*qq[k];

}

〃更新b1,隐藏层的阈值

{

b1[j]+=rate*ppj

}

}//endisampe=Math.Sqrt(e);〃均方差〃adjustWV(w,dw);

//adjustWV(v,dv);

}//endtrainpublicvoidadjustWV(double[,]w,double[,]dw)

{

for(inti=O;i

{

for(intj=O;j

(1);j++){

w[i,j]+=dw[i,j];

}

}

publicvoidadjustWV(double[]w,double[]dw)

{

for(inti=O;i

{

w[i]+=dw[i];

}

}

//数据仿真函数

publicdouble[]sim(double[]psim)

{

for(inti=O;i

x[i]=psim[i]/in_rate;//in_rate为归一化系数

for(intj=O;j

{

o1[j]=0.0;

for(inti=O;i

o1[j]=o1[j]+w[i,j]*x[i];

x1[j]=1.0/(1.0+Math.Exp(-o1[j]-b1[j]));

}

for(intk=O;k

{

o2[k]=0.0;

for(intj=O;j

o2[k]=o2[k]+v[j,k]*x1[j];

x2[k]=1.0/(1.0+Math.Exp(-o2[k]-b2[k]));

x2[k]=in_rate*x2[k];

}?

returnx2;

}//endsim

〃保存矩阵w,v

publicvoidsaveMatrix(double[,]w,stringfilename)

{

StreamWritersw=File.CreateText(filename);

for(inti=O;i

{

for(intj=O;j

(1);j++)

{

sw.Write(w[i,j]+"");

}

sw.WriteLine();

}

sw.Close();

}

〃保存矩阵b1,b2

publicvoidsaveMatrix(double[]b,stringfilename){

StreamWritersw=File.CreateText(filename);

for(inti=O;i

{

sw.Write(b[i]+"");

}

sw.Close();

}

〃读取矩阵W,V

publicvoidreadMatrixW(double[,]w,stringfilename)

{

StreamReadersr;

try?

{

sr=newStreamReader(filename,Encoding.GetEncoding("gb23

12"));?

Stringline;

inti=0;

while((line=sr.ReadLine())!

=null)?

{

string[]s1=line.Trim().Split('');

for(intj=O;j

{

w[i,j]=Convert.ToDouble(s1[j]);

}

i++;

}

sr.Close();

}

catch(Exceptione)?

{

//Lettheuserknowwhatwentwrong.

Console.WriteLine("Thefilecouldnotberead:

");

Console.WriteLine(e.Message);

}

〃读取矩阵b1,b2

publicvoidreadMatrixB(double[]b,stringfilename)

{

StreamReadersr;

try?

{?

sr=newStreamReader(filename,Encoding.GetEncoding("gb23

12"));?

Stringline;

inti=0;?

while((line=sr.ReadLine())!

=null)?

{

b[i]=Convert.ToDouble(line);

i++;

}

sr.Close();

}

catch(Exceptione)?

{

//Lettheuserknowwhatwentwrong.

Console.WriteLine("Thefilecouldnotberead:

");

Console.WriteLine(e.Message);

}?

}//endbpnet

}//endnamespace//主调用程序namespaceBpANNet

{

///

///Classi的摘要说明。

///

classClassl

{

///

///应用程序的主入口点。

///

[STAThread]

staticvoidMain(string[]args)

{

〃0.1399,0.1467,0.1567,0.1595,0.1588,0.1622,0.1611,0.1615,

0.1685,0.1789,0.1790

〃double[,]p1=newdouble[,]{{0.05,0.02},{0.09,0.11},{0.12,0.20},{0.15,0.22},{0.20,0.25},{0.75,0.75},{0.80,0.83},{0.82,0.80},{0.90,0.89},{0.95,0.89},{0.09,0.04},{0.1,0.1},{0.14,0.21},{0.18,0.24},{0.22,0.28},{0.77,0.78},{0.79,0.81},{0.84,0.82},{0.94,0.93},{0.98,0.99}};

〃double[,]t仁newdouble[,]{{1,0},{1,0},{1,0},{1,0},{1,0},{0,1},{0,1},{0,1},{0,1},{0,1},{1,0},{1,0},{1,0},{1,0},{1,0},{0,1},{0,1},{0,1},{0,1},{0,1}};

〃p1是输入的信息,一共5组,输入层为六个节点,p1[5][6]

double[,]p1=newdouble[,]{

{0.1399,0.1467,0.1567,0.1595,0.1588,0.1622},

{0.1467,0.1567,0.1595,0.1588,0.1622,0.1611},

{0.1567,0.1595,0.1588,0.1622,0.1611,0.1615},

{0.1595,0.1588,0.1622,0.1611,0.1615,0.1685},

{0.1588,0.1622,0.1611,0.1615,0.1685,0.1789}};

〃t1是输出信息,一共6组,t1[6][1]

double[,]t1=newdouble[,]{

{0.1622},

{0.1611},

{0.1615},

{0.1685},

{0.1789},

{0.1790}};

BpNetbp=newBpNet(p1,t1);

intstudy=0;

do

{

study++;

bp.train(p1,t1);

〃bp.rate=0.95-(0.95-0.3)*study/50000;

//Console.Write("第"+study+"次学习:

");

//Console.WriteLine("均方差为"+bp.e);

}while(bp.e>0.001&&study<50000);

Console.Write(”第"+study+"次学习:

");Console.WriteLine("均方差为"+bp.e);bp.saveMatrix(bp.w,"w.txt");bp.saveMatrix(bp.v,"v.txt");

bp.saveMatrix(bp.b1,"b1.txt");

bp.saveMatrix(bp.b2,"b2.txt");

〃double[,]p2=newdouble[,]{{0.05,0.02},{0.09,0.11},{0.12,0.20},

{0.15,0.22},{0.20,0.25},{0.75,0.75},{0.80,0.83},{0.82,0.80},{0.9

0,0.89},{0.95,0.89},{0.09,0.04},{0.1,0.1},{0.14,0.21},{0.18,0.24},

{0.22,0.28},{0.77,0.78},{0.79,0.81},{0.84,0.82},{0.94,0.93},{0.9

8,0.99}};

double[,]p2=newdouble[,]{

{0.1399,0.1467,0.1567,0.1595,0.1588,0.1622},

{0.1622,0.1611,0.1615,0.1685,0.1789,0.1790}};

intaa=bp.inNum;

intbb=bp.outNum;

intcc=p2.GetLength(0);

double[]p21=newdouble[aa];

double[]t2=newdouble[bb];

for(intn=0;n

{

for(inti=0;i

{p21[i]=p2[n,i];

}

t2=bp.sim(p21);

for(inti=0;i

{

Console.WriteLine(t2[i]+"");

}

}

Console.ReadLine();

}

}

}?

展开阅读全文
相关资源
猜你喜欢
相关搜索

当前位置:首页 > 解决方案 > 营销活动策划

copyright@ 2008-2022 冰豆网网站版权所有

经营许可证编号:鄂ICP备2022015515号-1