人工智能应用技术实验报告人工神经网络程序设计.docx
《人工智能应用技术实验报告人工神经网络程序设计.docx》由会员分享,可在线阅读,更多相关《人工智能应用技术实验报告人工神经网络程序设计.docx(15页珍藏版)》请在冰豆网上搜索。
人工智能应用技术实验报告人工神经网络程序设计
实验报告
课程名称人工智能应用技术
实验项目人工神经网络程序设计
实验仪器WindowsXP、VisualC++
学院信息管理学院
专业信息安全
班级/学号信安1401
学生姓名Cony
实验日期2016-5-10
成绩
指导教师赵刚
北京信息科技大学
信息管理学院
(课程上机)实验报告
实验课程名称:
人工智能应用技术专业:
信息安全班级:
学号:
姓名:
实验名称
人工神经网络程序设计
实验地点
学院机房
实验时间
5/101—4节
1.实验目的:
●掌握基本神经网络的常用学习规则
●掌握人工神经网络的训练过程
2.实验内容:
●相关知识:
基本神经网络(感知器,前馈网络)的常用学习规则
●实验环境:
WindowsXP,Visualstudio
●主要内容:
人工神经网络的程序设计与实现
3.实验要求:
●完成神经网络学习程序的调试,课堂演示程序执行结果
●输出神经网络权值调整过程值,分析结果数据,绘制神经网络
●提交实验报告
4.实验准备:
掌握感知器学习算法
1初始化:
将权值向量赋予随机值,t=0(迭代次数)
2连接权的修正:
对每个输入样本xk及期望输出dk完成如下计算
a.计算网络输出:
y=f(S),其中S=∑wixi,f为激活函数
b.计算输出层单元期望输出dk与实际输出y间的误差:
ek=dk-y
c.若ek为零,则说明当前样本输出正确,不必更新权值,否则更新权值:
w(t+1)=w(t)+α×ek×xk
t=t+1
0<α<1为学习率。
3对所有的输入样本重复步骤
(2),直到所有的样本输出正确为止
5.实验过程:
#include
#include"stdafx.h"
#defineMAX_ITERATIONS1000
#defineINPUT_NEURONS2
#defineNUM_WEIGHTS(INPUT_NEURONS+1)
#defineALPHA(double)0.2
doubleweights[NUM_WEIGHTS];
typedefstruct{
doublea;
doubleb;
doubleexpected;
}training_data_t;
#defineMAX_TESTS4
training_data_ttraining_set[MAX_TESTS]={
{-1.0,-1.0,-1.0},
{-1.0,1.0,1.0},
{1.0,-1.0,1.0},
{1.0,1.0,1.0}};
doublecompute(inttest)
{
doubleresult;
/*Equation10.2*/
result=((training_set[test].a*weights[0])+
(training_set[test].b*weights[1])+
(1.0*weights[2]));
if(result>0.0)result=1.0;
elseresult=-1.0;
returnresult;
}
intmain()
{
inti,test;
doubleoutput;
intchange;
/*Initializetheweightsfortheperceptron*/
for(i=0;i/*Traintheperceptronwiththetrainingset*/
change=1;
while(change){
change=0;
for(test=0;test/*Testontheperceptron*/
output=compute(test);
/*PerceptronLearningAlgorithm*/
doubledif=training_set[test].expected-output;
if((int)training_set[test].expected!
=(int)output){
/*UseEquation10.3*/
weights[0]+=ALPHA*
training_set[test].expected*training_set[test].a;
weights[1]+=ALPHA*
training_set[test].expected*training_set[test].b;
weights[2]+=ALPHA*training_set[test].expected;
change=1;
}
}
}
/*CheckthestatusofthePerceptron*/
for(i=0;iprintf("%gOR%g=%g\n",
training_set[i].a,training_set[i].b,compute(i));
}
return0;
}
#include
#include
#include"maths.c"
#include"rand.h"
#defineINPUT_NEURONS35
#defineHIDDEN_NEURONS10
#defineOUTPUT_NEURONS10
doubleinputs[INPUT_NEURONS+1];
doublehidden[HIDDEN_NEURONS+1];
doubleoutputs[OUTPUT_NEURONS];
#defineRHO(double)0.1
doublew_h_i[HIDDEN_NEURONS][INPUT_NEURONS+1];
doublew_o_h[OUTPUT_NEURONS][HIDDEN_NEURONS+1];
#defineRAND_WEIGHT(((double)rand()/(double)RAND_MAX)-0.5)
#defineIMAGE_SIZE35
typedefstructtest_images_s{
intimage[IMAGE_SIZE];
intoutput[OUTPUT_NEURONS];
}test_image_t;
#defineMAX_TESTS10
test_image_ttests[MAX_TESTS]={
{{0,1,1,1,0,//0
1,0,0,0,1,
1,0,0,0,1,
1,0,0,0,1,
1,0,0,0,1,
1,0,0,0,1,
0,1,1,1,0},
{1,0,0,0,0,0,0,0,0,0}},
{{0,0,1,0,0,//1
0,1,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
0,1,1,1,0},
{0,1,0,0,0,0,0,0,0,0}},
{{0,1,1,1,0,//2
1,0,0,0,1,
0,0,0,0,1,
0,0,1,1,0,
0,1,0,0,0,
1,0,0,0,0,
1,1,1,1,1},
{0,0,1,0,0,0,0,0,0,0}},
{{0,1,1,1,0,//3
1,0,0,0,1,
0,0,0,0,1,
0,0,1,1,0,
0,0,0,0,1,
1,0,0,0,1,
0,1,1,1,0},
{0,0,0,1,0,0,0,0,0,0}},
{{0,0,0,1,0,//4
0,0,1,1,0,
0,1,0,1,0,
1,1,1,1,1,
0,0,0,1,0,
0,0,0,1,0,
0,0,0,1,0},
{0,0,0,0,1,0,0,0,0,0}},
{{1,1,1,1,1,//5
1,0,0,0,0,
1,0,0,0,0,
1,1,1,1,0,
0,0,0,0,1,
1,0,0,0,1,
0,1,1,1,0},
{0,0,0,0,0,1,0,0,0,0}},
{{0,1,1,1,0,//6
1,0,0,0,0,
1,0,0,0,0,
1,1,1,1,0,
1,0,0,0,1,
1,0,0,0,1,
0,1,1,1,0},
{0,0,0,0,0,0,1,0,0,0}},
{{1,1,1,1,1,//7
1,0,0,0,1,
0,0,0,0,1,
0,0,0,1,0,
0,0,1,0,0,
0,1,0,0,0,
0,1,0,0,0},
{0,0,0,0,0,0,0,1,0,0}},
{{0,1,1,1,0,//8
1,0,0,0,1,
1,0,0,0,1,
0,1,1,1,0,
1,0,0,0,1,
1,0,0,0,1,
0,1,1,1,0},
{0,0,0,0,0,0,0,0,1,0}},
{{0,1,1,1,0,//9
1,0,0,0,1,
1,0,0,0,1,
0,1,1,1,1,
0,0,0,0,1,
0,0,0,1,0,
0,1,1,0,0},
{0,0,0,0,0,0,0,0,0,1}}
};
voidinit_network(void)
{
inti,j;
/*Settheinputbias*/
inputs[INPUT_NEURONS]=1.0;
/*Setthehiddenbias*/
hidden[HIDDEN_NEURONS]=1.0;
/*Initializetheinput->hiddenweights*/
for(j=0;jfor(i=0;iw_h_i[j][i]=RAND_WEIGHT;
}
}
for(j=0;jfor(i=0;iw_o_h[j][i]=RAND_WEIGHT;
}
}
return;
}
voidfeed_forward(void)
{
inti,j;
/*Calculateoutputsofthehiddenlayer*/
for(i=0;ihidden[i]=0.0;
for(j=0;jhidden[i]+=(w_h_i[i][j]*inputs[j]);
}
hidden[i]=sigmoid(hidden[i]);
}
/*Calculateoutputsfortheoutputlayer*/
for(i=0;ioutputs[i]=0.0;
for(j=0;joutputs[i]+=(w_o_h[i][j]*hidden[j]);
}
outputs[i]=sigmoid(outputs[i]);
}
}
voidbackpropagate_error(inttest)
{
intout,hid,inp;
doubleerr_out[OUTPUT_NEURONS];
doubleerr_hid[HIDDEN_NEURONS];
/*Computetheerrorfortheoutputnodes(Equation10.6)*/
for(out=0;outerr_out[out]=((double)tests[test].output[out]-outputs[out])*
sigmoid_d(outputs[out]);
}
/*Computetheerrorforthehiddennodes(Equation10.7)*/
for(hid=0;hiderr_hid[hid]=0.0;
/*Includeerrorcontributionforalloutputnodes*/
for(out=0;outerr_hid[hid]+=err_out[out]*w_o_h[out][hid];
}
err_hid[hid]*=sigmoid_d(hidden[hid]);
}
/*Adjusttheweightsfromthehiddentooutputlayer(Equation10.9)*/
for(out=0;outfor(hid=0;hidw_o_h[out][hid]+=RHO*err_out[out]*hidden[hid];
}
}
/*Adjusttheweightsfromtheinputtohiddenlayer(Equation10.9)*/
for(hid=0;hidfor(inp=0;inpw_h_i[hid][inp]+=RHO*err_hid[hid]*inputs[inp];
}
}
return;
}
doublecalculate_mse(inttest)
{
doublemse=0.0;
inti;
for(i=0;imse+=sqr((tests[test].output[i]-outputs[i]));
}
return(mse/(double)i);
}
voidset_network_inputs(inttest,doublenoise_prob)
{
inti;
/*Fillthenetworkinputsvectorfromthetest*/
for(i=0;iinputs[i]=tests[test].image[i];
/*Inthegivennoiseprobability,negatethecell*/
if(RANDOM()inputs[i]=(inputs[i])?
0:
1;
}
}
return;
}
intclassifier(void)
{
inti,best;
doublemax;
best=0;
max=outputs[0];
for(i=1;iif(outputs[i]>max){
max=outputs[i];
best=i;
}
}
returnbest;
}
intmain(void)
{
doublemse,noise_prob;
inttest,i,j;
RANDINIT();
init_network();
do{
/*Pickatestatrandom*/
test=RANDMAX(MAX_TESTS);
/*Grabinputimage(withnonoise)*/
set_network_inputs(test,0.0);
/*Feedthisdatasetforward*/
feed_forward();
/*Backpropagatetheerror*/
backpropagate_error(test);
/*CalculatethecurrentMSE*/
mse=calculate_mse(test);
}while(mse>0.001);
/*Now,let'stestthenetworkwithincreasingamountsofnoise*/
test=RANDMAX(MAX_TESTS);
/*Startwith5%noiseprobability,endwith25%(perpixel)*/
noise_prob=0.05;
for(i=0;i<5;i++){
set_network_inputs(test,noise_prob);
feed_forward();
for(j=0;jif((j%5)==0)printf("\n");
printf("%d",(int)inputs[j]);
}
printf("\nclassifiedas%d\n\n",classifier());
noise_prob+=0.05;
}
return0;
}
6.实验总结:
(实验结果及分析)
通过人工神经程序设计的学习,我进一步了解了感知器和神经网络算法,包括期望值的调整等内容,同时更加熟练地使用c语言进行程序设计,对程序设计中遇到的各种问题渐渐地有了自己的认识和解决方案。
说明:
1.实验名称、实验目的、实验内容、实验要求由教师确定,实验前由教师事先填好,然后作为实验报告模版供学生使用;
2.实验准备由学生在实验或上机之前填写,教师应该在实验前检查;
3.实验过程由学生记录实验的过程,包括操作过程、遇到哪些问题以及如何解决等;
4.实验总结由学生在实验后填写,总结本次实验的收获、未解决的问题以及体会和建议等;
5.源程序、代码、具体语句等,若表格空间不足时可作为附录另外附页。