SMO算法伪代码Word文档下载推荐.docx

上传人:b****6 文档编号:21856631 上传时间:2023-02-01 格式:DOCX 页数:10 大小:18.17KB
下载 相关 举报
SMO算法伪代码Word文档下载推荐.docx_第1页
第1页 / 共10页
SMO算法伪代码Word文档下载推荐.docx_第2页
第2页 / 共10页
SMO算法伪代码Word文档下载推荐.docx_第3页
第3页 / 共10页
SMO算法伪代码Word文档下载推荐.docx_第4页
第4页 / 共10页
SMO算法伪代码Word文档下载推荐.docx_第5页
第5页 / 共10页
点击查看更多>>
下载资源
资源描述

SMO算法伪代码Word文档下载推荐.docx

《SMO算法伪代码Word文档下载推荐.docx》由会员分享,可在线阅读,更多相关《SMO算法伪代码Word文档下载推荐.docx(10页珍藏版)》请在冰豆网上搜索。

SMO算法伪代码Word文档下载推荐.docx

%PLATT,J.~C.(1998).

%Fasttrainingofsupportvectormachinesusingsequentialminimal

%optimization.InSchö

lkopf,B.,Burges,C.,andSmola,A.~J.,editors,

%AdvancesinKernelMethods:

SupportVectorLearning,chapter~12,

%pages185--208.MITPress,Cambridge,Massachusetts.

%History:

May15/2001-v1.00

ifsize(Y,2)~=1|~isreal(Y)

error('

ymustbearealdoubleprecisioncolumnvector'

);

end

n=size(Y,1);

ifn~=size(X,1)

xandymusthavethesamenumberofrows'

if(nargin<

3|nargin>

7)%checkcorrectnumberofarguments

helpsvc

return;

end;

ifnargin==4&

isa(C,'

svc'

net=C;

C=get(net,'

C'

kernel=get(net,'

kernel'

else

ifnargin<

4,C=Inf;

end;

5,kernel=linear;

NOBIAS=0;

switchnargin

case5

ifischar(kernel)&

strcmp(kernel,'

NOBIAS=1;

case6

ifischar(alpha_init)&

strcmp(alpha_init,'

case7

ifischar(bias_init)&

strcmp(bias_init,'

ifnargin==7

ifn~=size(alpha_init,1)

alphamustbearealdoubleprecisioncolumnvectorwiththesamesizeasy'

ifany(alpha_init<

0)

error('

Nopuedenexistiralphasnegativos'

alpha_init=zeros(n,1);

%inicializolospesosazeros

bias_init=0;

%inicializothresholdazero

fprintf('

\n\nSequentialMinimalOptimization:

SVMsforClassification\n'

fprintf('

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'

tic;

ifNOBIAS

SMO=SMOTutorNOBIAS(X,Y,C,kernel,alpha_init,bias_init);

SMO=SMOTutor(X,Y,C,kernel,alpha_init,bias_init);

Executiontime:

%4.1fseconds\n'

toc);

sv=X;

w=(SMO.alpha.*Y)'

;

%weightvector

net=svc(kernel,sv,w,SMO.bias,C);

Epochs:

%d\n'

SMO.epochs);

w0_2=w*SMO.Kcache*w'

|w0|^2:

%f\n'

w0_2);

Margin:

1/sqrt(w0_2));

NUMSV=nonZeroLagrangeMultipliers;

SupportVectors:

%d(%3.1f%%)\n\n'

NUMSV,100*NUMSV/n);

functionRESULT=SMOTutor(x,y,C,kernel,alpha_init,bias_init)

%ImplementationoftheSequentialMinimalOptimization(SMO)

%trainingalgorithmforVapnik'

sSupportVectorMachine(SVM)

globalSMO;

[ntp,d]=size(x);

%Inicializandolasvariables

SMO.epsilon=svtol(C);

SMO.tolerance=KKTtol;

SMO.x=x;

SMO.y=y;

SMO.C=C;

SMO.kernel=kernel;

SMO.alpha=alpha_init;

SMO.bias=bias_init;

SMO.ntp=ntp;

%numberoftrainingpoints

%CACHES:

SMO.Kcache=evaluate(kernel,x,x);

%kernelevaluations

SMO.error=zeros(SMO.ntp,1);

%error

if~any(SMO.alpha)

%Comotodoslosalpha(i)sonzeros,entoncesfwd(i),tambieneszero

SMO.error=-y;

SMO.error=fwd(1:

ntp)-y;

numChanged=0;

examineAll=1;

epoch=0;

%Whenalldatawereexaminedandnochangesdonetheloopreachsits

%end.Otherwise,loopswithalldataandlikelysupportvectorare

%alternateduntilallsupportvectorbefound.

while(numChanged>

0)|examineAll

ifexamineAll

%Loopsobretodoslospuntos

fori=1:

ntp

numChanged=numChanged+examineExample(i);

%LoopsobreKKTpoints

%SololospuntosqueviolanlascondicionesKKT

if(SMO.alpha(i)>

SMO.epsilon)&

(SMO.alpha(i)<

(SMO.C-SMO.epsilon))

if(examineAll==1)

examineAll=0;

elseif(numChanged==0)

examineAll=1;

epoch=epoch+1;

%trerror=1;

%100*sum((error)<

0)/ntp;

%fprintf('

Epoch:

%d,TRError:

%g%%,numChanged:

%d,alpha>

0:

%d,0<

alpha<

C:

%d\n'

...

%epoch,...

%trerror,...

%numChanged,...

%nonZeroLagrangeMultipliers,...

%nonBoundLagrangeMultipliers);

%WRITERESULTADOSADISCO,W,B,ERROR

SMO.epochs=epoch;

RESULT=SMO;

functionRESULT=nonZeroLagrangeMultipliers;

RESULT=sum(SMO.alpha>

SMO.epsilon);

functionRESULT=nonBoundLagrangeMultipliers;

RESULT=sum((SMO.alpha>

(SMO.alpha<

(SMO.C-SMO.epsilon)));

functionRESULT=fwd(n)

LN=length(n);

RESULT=-SMO.bias+sum(repmat(SMO.y,1,LN).*repmat(SMO.alpha,1,LN).*SMO.Kcache(:

n))'

functionRESULT=examineExample(i2)

%Firstheuristicselectsi2andaskstoexamineExampletofinda

%secondpoint(i1)inordertodoanoptimizationstepwithtwo

%Lagrangemultipliers

alpha2=SMO.alpha(i2);

y2=SMO.y(i2);

if((alpha2>

SMO.epsilon)&

(alpha2<

(SMO.C-SMO.epsilon)))

e2=SMO.error(i2);

e2=fwd(i2)-y2;

%r2<

0ifpointi2isplacedbetweenmargin(-1)-(+1)

%Otherwiser2is>

0.r2=f2*y2-1

r2=e2*y2;

%KKTconditions:

%r2>

0andalpha2==0(wellclassified)

%r2==0and0<

alpha2<

C(supportvectorsatmargins)

%r2<

0andalpha2==C(supportvectorsbetweenmargins)

%TesttheKKTconditionsforthecurrenti2point.

%Ifapointiswellclassifieditsalphamustbe0orif

%itisoutofitsmarginitsalphamustbeC.Ifitisatmargin

%itsalphamustbebetween0<

C.

%takeactiononlyifi2violatesKarush-Kuhn-Tuckerconditions

if((r2<

-SMO.tolerance)&

(SMO.C-SMO.epsilon)))|...

((r2>

SMO.tolerance)&

(alpha2>

SMO.epsilon))

%Ifitdoens'

tviolateKKTconditionsthenexit,otherwisecontinue.

%Tryi2bythreeways;

ifsuccessful,thenimmediatelyreturn1;

RESULT=1;

%Firsttheroutinetriestofindani1lagrangemultiplierthat

%maximizesthemeasure|E1-E2|.Aslargethisvalueisasbigger

%thedualobjectivefunctionbecames.

%Inthisfirsttest,onlysupportvectorswillbetested.

POS=find((SMO.alpha>

(SMO.alpha<

(SMO.C-SMO.epsilon)));

[MAX,i1]=max(abs(e2-SMO.error(POS)));

if~isempty(i1)

iftakeStep(i1,i2,e2),return;

%ThesecondheuristicchooseanyLagrangeMultiplierthatisaSVandtriestooptimize

fori1=randperm(SMO.ntp)

if(SMO.alpha(i1)>

(SMO.alpha(i1)<

(SMO.C-SMO.epsilon))

%ifagoodi1isfound,optimise

%ifbothheuristcabovefail,iterateoveralldataset

if~((SMO.alpha(i1)>

%noprogresspossible

RESULT=0;

functionRESULT=takeStep(i1,i2,e2)

%forapairofalphaindexes,verifyifitispossibletoexecute

%theoptimisationdescribedbyPlatt.

if(i1==i2),return;

%computeupperandlowerconstraints,LandH,onmultipliera2

alpha1=SMO.alpha(i1);

alpha2=SMO.alpha(i2);

x1=SMO.x(i1);

x2=SMO.x(i2);

y1=SMO.y(i1);

C=SMO.C;

K=SMO.Kcache;

s=y1*y2;

if(y1~=y2)

L=max(0,alpha2-alpha1);

H=min(C,alpha2-alpha1+C);

L=max(0,alpha1+alpha2-C);

H=min(C,alpha1+alpha2);

if(L==H),return;

if(alpha1>

(alpha1<

(C-SMO.epsilon))

e1=SMO.error(i1);

e1=fwd(i1)-y1;

%if(alpha2>

%e2=SMO.error(i2);

%else

%e2=fwd(i2)-y2;

%end;

%computeeta

k11=K(i1,i1);

k12=K(i1,i2);

k22=K(i2,i2);

eta=2.0*k12-k11-k22;

%recomputeLagrangemultiplierforpatterni2

if(eta<

0.0)

a2=alpha2-y2*(e1-e2)/eta;

%constraina2toliebetweenLandH

if(a2<

L)

a2=L;

elseif(a2>

H)

a2=H;

%Whenetaisnotnegative,theobjectivefunctionWshouldbe

%evaluatedateachendofthelinesegment.Onlythosetermsinthe

%objectivefunctionthatdependonalpha2needbeevaluated...

ind=find(SMO.alpha>

0);

aa2=L;

aa1=alpha1+s*(alpha2-aa2);

Lobj=aa1+aa2+sum((-y1*aa1/2).*SMO.y(ind).*K(ind,i1)+(-y2*aa2/2).*SMO.y(ind).*K(ind,i2));

aa2=H;

Hobj=aa1+aa2+sum((-y1*aa1/2).*SMO.y(ind).*K(ind,i1)+(-y2*aa2/2).*SMO.y(ind).*K(ind,i2));

if(Lobj>

Hobj+SMO.epsilon)

elseif(Lobj<

Hobj-SMO.epsilon)

a2=alpha2;

if(abs(a2-alpha2)<

SMO.epsilon*(a2+alpha2+SMO.epsilon))

%recomputeLagrangemultiplierforpatterni1

a1=alpha1+s*(alpha2-a2);

w1=y1*(a1-alpha1);

w2=y2*(a2-alpha2);

%updatethresholdtoreflectchangeinLagrangemultipliers

b1=SMO.bias+e1+w1*k11+w2*k12;

bold=SMO.bias;

if(a1>

(a1<

(C-SMO.epsilon))

SMO.bias=b1;

b2=SMO.bias+e2+w1*k12+w2*k22;

if(a2>

(a2<

SMO.bias=b2;

SMO.bias=(b1+b2)/2;

%updateerrorcacheusingnewLagrangemultipliers

SMO.error=SMO.error+w1*K(:

i1)+w2*K(:

i2)+bold-SMO.bias;

SMO.error(i1)=0.0;

SMO.error(i2)=0.0;

%updatevectorofLagrangemultipliers

SMO.alpha(i1)=a1;

SMO

展开阅读全文
相关资源
猜你喜欢
相关搜索

当前位置:首页 > 成人教育 > 专升本

copyright@ 2008-2022 冰豆网网站版权所有

经营许可证编号:鄂ICP备2022015515号-1