(3) B(k)[x,y]=1;if b(k,k-1)[x,y]&&b(k+1,k)[x,y]==1;
B(k)[x,y]= 0;if b(k,k-1)[x,y]&&b(k+1,k)[x,y]==0;
到了这里,比较关键的就是第2步的阈值T的选取问题,单纯用otsu算法分割貌似效果不太好,如果手动设置一个较小的值(如10)效果
还可以,但手动设置有一定的限制性。
接下来要研究局部阈值的选取。
用otsu取阈值实现的一个三分差法代码。
效果不是很好。
#include"highgui.h"
#include"cv.h"
#include"cxcore.h"
#include"cvaux.h"
#include
#include
#include
#include
#include
#include
#include
#include
usingnamespacestd;
//#pragmacomment(lib,"highgui200.lib")
//#pragmacomment(lib,"cv200.lib")
//#pragmacomment(lib,"cxcore200.lib")
//#pragmacomment(lib,"cvaux200.lib")
#defineGET_IMAGE_DATA(img,x,y)((uchar*)(img->imageData+img->widthStep*(y)))[x]
intT=10;
intNum[300];
intSum[300];
voidInitPixel(IplImage*img,int&_low,int&_top)
{
memset(Num,0,sizeof(Num));
memset(Sum,0,sizeof(Sum));
_low=255;
_top=0;
for(inti=0;iheight;i++)
{
for(intj=0;jwidth;j++)
{
inttemp=((uchar*)(img->imageData+img->widthStep*i))[j];
if(temp<_low)
_low=temp;
if(temp>_top)
_top=temp;
Num[temp]+=1;
}
}
for(inti=1;i<256;i++)
{
Sum[i]=Sum[i-1]+i*Num[i];
Num[i]+=Num[i-1];
}
}
intotsu(IplImage*img)
{
int_low,_top,mbest=0;
floatmn=img->height*img->width;
InitPixel(img,_low,_top);
floatmax_otsu=0;
mbest=0;
if(_low==_top)
mbest=_low;
else
{
for(inti=_low;i<_top;i++)
{
floatw0=(float)((Num[_top]-Num[i])/mn);
floatw1=1-w0;
floatu0=(float)((Sum[_top]-Sum[i])/(Num[_top]-Num[i]));
floatu1=(float)(Sum[i]/Num[i]);
floatu=w0*u0+w1*u1;
floatg=w0*(u0-u)*(u0-u)+w1*(u1-u)*(u1-u);
if(g>max_otsu)
{
mbest=i;
max_otsu=g;
}
}
}
returnmbest;
}
intmain()
{
intncount=0;
IplImage*image1=NULL;
IplImage*image2=NULL;
IplImage*image3=NULL;
IplImage*Imask=NULL;
IplImage*Imask1=NULL;
IplImage*Imask2=NULL;
IplImage*Imask3=NULL;
IplImage*mframe=NULL;
CvCapture*capture=cvCreateFileCapture("E:
\\Motion\\IndoorGTTest2.avi");
//CvCapture*capture=cvCreateCameraCapture(0);
cvNamedWindow("src");
cvNamedWindow("dst");
cvNamedWindow("Imask1");
cvNamedWindow("Imask2");
cvNamedWindow("Imask3");
//cvCreateTrackbar("T","dst",&T,255,0);
while(mframe=cvQueryFrame(capture))
{
DWORDstart=GetTickCount();
if(ncount>1000000000)
ncount=100;
ncount+=1;
if(ncount==1)
{
image1=cvCreateImage(cvGetSize(mframe),IPL_DEPTH_8U,1);
image2=cvCreateImage(cvGetSize(mframe),IPL_DEPTH_8U,1);
image3=cvCreateImage(cvGetSize(mframe),IPL_DEPTH_8U,1);
Imask=cvCreateImage(cvGetSize(mframe),IPL_DEPTH_8U,1);
Imask1=cvCreateImage(cvGetSize(mframe),IPL_DEPTH_8U,1);
Imask2=cvCreateImage(cvGetSize(mframe),IPL_DEPTH_8U,1);
Imask3=cvCreateImage(cvGetSize(mframe),IPL_DEPTH_8U,1);
cvCvtColor(mframe,image1,CV_BGR2GRAY);
}
if(ncount==2)
cvCvtColor(mframe,image2,CV_BGR2GRAY);
if(ncount>=3)
{
if(ncount==3)
cvCvtColor(mframe,image3,CV_BGR2GRAY);
else
{
cvCopy(image2,image1);
cvCopy(image3,image2);
cvCvtColor(mframe,image3,CV_BGR2GRAY);
}
cvAbsDiff(image2,image1,Imask1);
cvAbsDiff(image3,image2,Imask2);
//cvShowImage("Imask1",Imask1);
//cvShowImage("Imask2",Imask2);
intmbest1=otsu(Imask1);
cvSmooth(Imask1,Imask1,CV_MEDIAN);
cvThreshold(Imask1,Imask1,mbest1,255,CV_THRESH_BINARY);
intmbest2=otsu(Imask2);
cvSmooth(Imask2,Imask2,CV_MEDIAN);
cvThreshold(Imask2,Imask2,mbest2,255,CV_THRESH_BINARY);
cout<cvAnd(Imask1,Imask2,Imask);
/*cvErode(Imask,Imask);
cvDilate(Imask,Imask);*/
DWORDfinish=GetTickCount();
//cout<cvShowImage("src",image2);
cvShowImage("dst",Imask);
}
charc=cvWaitKey(30);
if(c==27)
break;
}
return0;
}\
#include"stdafx.h"
#include"cv.h"
#include"cxcore.h"
#include"highgui.h"
#include"stdio.h"
intmain(intargc,_TCHAR*argv[])
{
IplImage*pFrame=NULL;
IplImage*pFrImg1=NULL;
IplImage*pFrImg2=NULL;
IplImage*pFrImg3=NULL;
IplImage*result1=NULL;
IplImage*result2=NULL;
IplImage*result=NULL;
IplImage*tempImg1=NULL;
CvSizesize;
CvMat*pFrMat1=NULL;
CvMat*pFrMat2=NULL;
CvMat*pFrMat3=NULL;
CvMat*reMat1=NULL;
CvMat*reMat2=NULL;
CvMat*reMat=NULL;
CvMat*tempMat1=NULL;
CvCapture*pCapture=NULL;
intnFrmNum=0;
char*reWin="effect"+nFrmNum;
cvNamedWindow("video",1);
cvNamedWindow("effect",1);
cvMoveWindow("video",0,0);
cvMoveWindow("effect",400,400);
if(argc!
=1)
{
fprintf(stderr,"Usage:
bkgrd\n");
return-1;
}
if(!
(pCapture=cvCaptureFromFile("F:
\\仿真结果\\Bvector.avi")))
{
fprintf(stderr,"Cannotopenvideofile%s\n","F:
\\仿真结果\\Bvector.avi");
return-2;
}
while(pFrame=cvQueryFrame(pCapture))
{
nFrmNum++;
if(nFrmNum>500)
{
nFrmNum=0;
break;
}
}
printf("视频帧数:
%d\n",nFrmNum);
cvSetCaptureProperty(
pCapture,
CV_CAP_PROP_POS_FRAMES,
0
);
//第一帧
pFrame=cvQueryFrame(pCapture);
if(nFrmNum>2)
{
pFrImg1=cvCreateImage(cvGetSize(pFrame),IPL_DEPTH_8U,1);
pFrImg2=cvCreateImage(cvGetSize(pFrame),IPL_DEPTH_8U,1);
pFrImg3=cvCreateImage(cvGetSize(pFrame),IPL_DEPTH_8U,1);
result1=cvCreateImage(cvGetSize(pFrame),IPL_DEPTH_8U,1);
result2=cvCreateImage(cvGetSize(pFrame),IPL_DEPTH_8U,1);
result=cvCreateImage(cvGetSize(pFrame),IPL_DEPTH_8U,1);
pFrMat1=cvCreateMat(pFrame->height,pFrame->width,CV_8UC1);
pFrMat2=cvCreateMat(pFrame->height,pFrame->width,CV_8UC1);
pFrMat3=cvCreateMat(pFrame->height,pFrame->width,CV_8UC1);
reMat1=cvCreateMat(pFrame->height,pFrame->width,CV_8UC1);
reMat2=cvCreateMat(pFrame->height,pFrame->width,CV_8UC1);
reMat=cvCreateMat(pFrame->height,pFrame->width,CV_8UC1);
size=cvSize(pFrame->width,pFrame->height);
tempImg1=cvCreateImage(cvSize((size.width&-2)/2,(size.height&-2)/2),8,1);
}
else
{
printf("帧数小于3!
\n");
return-1;
}
//先读取两帧
cvCvtColor(pFrame,pFrImg1,CV_BGR2GRAY);
cvConvert(pFrImg1,pFrMat1);
pFrame=cvQueryFrame(pCapture);
cvCvtColor(pFrame,pFrImg2,CV_BGR2GRAY);
cvConvert(pFrImg2,pFrMat2);
intcount=2;
while((pFrame=cvQueryFrame(pCapture)))
{
count++;
cvCvtColor(pFrame,pFrImg3,CV_BGR2GRAY);
cvConvert(pFrImg3,pFrMat3);
cvSmooth(pFrMat1,pFrMat1,CV_GAUSSIAN,3,1);
cvSmooth(pFrMat2,pFrMat2,CV_GAUSSIAN,3,1);
cvSmooth(pFrMat3,pFrMat3,CV_GAUSSIAN,3,1);
//当前帧跟前一帧相减
cvAbsDiff(pFrMat2,pFrMat1,reMat1);
cvAbsDiff(pFrMat3,pFrMat2,reMat2);
//二值化前景图
cvThreshold(reMat1,result1,60.0,255.0,CV_THRESH_BINARY);
cvThreshold(reMat2,result2,60.0,255.0,CV_THRESH_BINARY);
///cvAdaptiveThreshold(reMat1,result1,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,5,5);
//cvAdaptiveThreshold(reMat2,result2,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,5,5);
//两个帧差值相与
cvAnd(result1,result2,result,0);
//使用Gaussian金字塔分解对输入图像向下采样,输出图像的高度和宽度应是输入图像的一半
cvPyrDown(result,tempImg1,7);
//对输入图像进行膨胀
cvDilate(result,result,0,1);
//使用Gaussian金字塔分解对输入图像向上采样,输出图像的高度和宽度应是输入图像的2倍
cvPyrUp(tempImg1,result,7);
//cvErode(result,result,0,1);
//cvDilate(result,result,0,1);
//cvPyrDown(result,tempImg1,7);
//cvPyrUp(tempImg1,result,7);
cvErode(result,result,0,3);
cvDilate(result,result,0,3);
//翻转图像,使其正立显示
cvFlip(result,NULL,0);
cvShowImage("video",pFrame);
cvShowImage("effect",result);
cvCopy(pFrMat2,pFrMat1,NULL);
cvCopy(pFrMat3,pFrMat2,NULL);
if(cvWaitKey(20)>=0)
break;
printf("\nFrame=%d\n",count);
}
//销毁窗口
cvDestroyWindow("video");
//cvDestroyWindow(reWin);
cvDestroyWindow("effect");
//释放图像和矩阵
cvReleaseImage(&pFrImg1);
cvReleaseImage(&pFrImg2);
cvReleaseImage(&result1);
cvReleaseImage(&result2);
cvReleaseImage(&result);
cvReleaseImage(&tempImg1);
cvReleaseMat(&pFrMat1);
cvReleaseMat(&pFrMat2);
cvReleaseMat(&reMat1);
cvReleaseMat(&reMat2);
cvReleaseMat(&reMat);
cvReleaseCapture(&pCapture);
return0;
}