机器学习算法:SVM(支持向量机)
SVM算法(Support Vector Machine,支持向量机)的核心思想有2点:1、如果数据线性可分,那么基于最大间隔的方式来确定超平面,以确保全局最优,使得分类器尽可能健壮;2、如果数据线性不可分,通过核函数将低维样本转化为高维样本使其线性可分。注意和AdaBoost类似,SVM只能解决二分类问题。
SVM的算法在数学上实在是太复杂了,没研究明白。建议还是直接使用现成的第三方组件吧,比如libsvm的C#版本,推荐这个:http://www.matthewajohnson.org/software/svm.html。
虽然没研究明白,不过这几天照着Python版本的代码试着用C#改写了一下,算是研究SVM过程中唯一的收获吧。此版本基于SMO(序列最小优化)算法求解,核函数使用的是比较常用的径向基函数(RBF)。别问我为什么没有注释,我只是从Python移植过来的,我也没看懂,等我看懂了再来补注释吧。
usingSystem;usingSystem.Collections.Generic;usingSystem.Linq;namespaceMachineLearning{///<summary>///支持向量机(SMO算法,RBF核)///</summary>publicclassSVM{privateRandomm_Rand;privatedouble[][]m_Kernel;privatedouble[]m_Alpha;privatedoublem_C=1.0;privatedoublem_B=0.0;privatedoublem_Toler=0.0;privatedouble[][]m_Cache;privatedouble[][]m_Data;privatedoublem_Reach;privateint[]m_Label;privateintm_Count;privateintm_Dimension;publicSVM(){m_Rand=newRandom();}///<summary>///训练///</summary>///<paramname="trainingSet"></param>///<paramname="C"></param>///<paramname="toler"></param>///<paramname="reach"></param>///<paramname="iterateCount"></param>publicvoidTrain(List<DataVector<double,int>>trainingSet,doubleC,doubletoler,doublereach,intiterateCount=10){//初始化m_Count=trainingSet.Count;m_Dimension=trainingSet[0].Dimension;m_Toler=toler;m_C=C;m_Reach=reach;this.Init(trainingSet);this.InitKernel();intiter=0;intalphaChanged=0;boolentireSet=true;while(iter<iterateCount&&(alphaChanged>0||entireSet)){alphaChanged=0;if(entireSet){for(inti=0;i<m_Count;++i)alphaChanged+=InnerL(i);iter++;}else{for(inti=0;i<m_Count;++i){if(m_Alpha[i]>0&&m_Alpha[i]<m_C)alphaChanged+=InnerL(i);}iter+=1;}if(entireSet)entireSet=false;elseif(alphaChanged==0)entireSet=true;}}///<summary>///分类///</summary>///<paramname="vector"></param>///<returns></returns>publicintClassify(DataVector<double,int>vector){doublepredict=0.0;intsvCnt=m_Alpha.Count(a=>a>0);varsupportVectors=newdouble[svCnt][];varsupportLabels=newint[svCnt];varsupportAlphas=newdouble[svCnt];intindex=0;for(inti=0;i<m_Count;++i){if(m_Alpha[i]>0){supportVectors[index]=m_Data[i];supportLabels[index]=m_Label[i];supportAlphas[index]=m_Alpha[i];index++;}}varkernelEval=KernelTrans(supportVectors,vector.Data);for(inti=0;i<svCnt;++i)predict+=kernelEval[i]*supportAlphas[i]*supportLabels[i];predict+=m_B;returnMath.Sign(predict);}///<summary>///将原始数据转化成方便使用的形式///</summary>///<paramname="trainingSet"></param>privatevoidInit(List<DataVector<double,int>>trainingSet){m_Data=newdouble[m_Count][];m_Label=newint[m_Count];m_Alpha=newdouble[m_Count];m_Cache=newdouble[m_Count][];for(inti=0;i<m_Count;++i){m_Label[i]=trainingSet[i].Label;m_Alpha[i]=0.0;m_Cache[i]=newdouble[2];m_Cache[i][0]=0.0;m_Cache[i][1]=0.0;m_Data[i]=newdouble[m_Dimension];for(intj=0;j<m_Dimension;++j)m_Data[i][j]=trainingSet[i].Data[j];}}///<summary>///初始化RBF核///</summary>privatevoidInitKernel(){m_Kernel=newdouble[m_Count][];for(inti=0;i<m_Count;++i){m_Kernel[i]=newdouble[m_Count];varkernels=KernelTrans(m_Data,m_Data[i]);for(intk=0;k<kernels.Length;++k)m_Kernel[i][k]=kernels[k];}}privatedouble[]KernelTrans(double[][]X,double[]A){varkernel=newdouble[X.Length];for(inti=0;i<X.Length;++i){doubledelta=0.0;for(intk=0;k<X[0].Length;++k)delta+=Math.Pow(X[i][k]-A[k],2);kernel[i]=Math.Exp(delta*-1.0/Math.Pow(m_Reach,2));}returnkernel;}privatedoubleE(intk){doublex=0.0;for(inti=0;i<m_Count;++i)x+=m_Alpha[i]*m_Label[i]*m_Kernel[i][k];x+=m_B;returnx-m_Label[k];}privatevoidUpdateE(intk){doubleEk=E(k);m_Cache[k][0]=1.0;m_Cache[k][1]=Ek;}privateintInnerL(inti){doubleEi=E(i);if((m_Label[i]*Ei<-m_Toler&&m_Alpha[i]<m_C)||(m_Label[i]*Ei>m_Toler&&m_Alpha[i]>0)){doubleEj=0.0;intj=SelectJ(i,Ei,outEj);doubleoldAi=m_Alpha[i];doubleoldAj=m_Alpha[j];doubleH,L;if(m_Label[i]!=m_Label[j]){L=Math.Max(0,m_Alpha[j]-m_Alpha[i]);H=Math.Min(m_C,m_C+m_Alpha[j]-m_Alpha[i]);}else{L=Math.Max(0,m_Alpha[j]+m_Alpha[i]-m_C);H=Math.Min(m_C,m_Alpha[j]+m_Alpha[i]);}if(L==H)return0;doubleeta=2.0*m_Kernel[i][j]-m_Kernel[i][i]-m_Kernel[j][j];if(eta>=0)return0;m_Alpha[j]-=m_Label[j]*(Ei-Ej)/eta;m_Alpha[j]=ClipAlpha(m_Alpha[j],H,L);UpdateE(j);if(Math.Abs(m_Alpha[j]-oldAj)<0.00001)return0;m_Alpha[i]+=m_Label[j]*m_Label[i]*(oldAj-m_Alpha[j]);UpdateE(i);doubleb1=m_B-Ei-m_Label[i]*(m_Alpha[i]-oldAi)*m_Kernel[i][i]-m_Label[j]*(m_Alpha[j]-oldAj)*m_Kernel[i][j];doubleb2=m_B-Ej-m_Label[i]*(m_Alpha[i]-oldAi)*m_Kernel[i][j]-m_Label[j]*(m_Alpha[j]-oldAj)*m_Kernel[j][j];if(m_Alpha[i]>0&&m_Alpha[i]<m_C)m_B=b1;elseif(m_Alpha[j]>0&&m_Alpha[j]<m_C)m_B=b2;elsem_B=(b1+b2)/2.0;return1;}return0;}privateintSelectJ(inti,doubleEi,outdoubleEj){Ej=0.0;intj=0;intmaxK=-1;doublemaxDeltaE=0.0;m_Cache[i][0]=1;m_Cache[i][1]=Ei;for(intk=0;k<m_Count;++k){if(k==i||m_Cache[k][0]==0)continue;doubleEk=E(k);doubledeltaE=Math.Abs(Ei-Ek);if(deltaE>maxDeltaE){maxK=k;maxDeltaE=deltaE;Ej=Ek;}}if(maxK>=0){j=maxK;}else{j=RandomSelect(i);}returnj;}privateintRandomSelect(inti){intj=0;do{j=m_Rand.Next(0,m_Count);}while(j==i);returnj;}privatedoubleClipAlpha(doublealpha,doubleH,doubleL){returnalpha>H?H:(alpha<L?L:alpha);}}}
最后上测试,还是使用上次的breast-cancer-wisconsin.txt做测试,之前用kNN和AdaBoost测试的错误率分别是2.02%和1.01%,这回用SVM对比一下。上测试代码:
publicvoidTestSvm(){vartrainingSet=newList<DataVector<double,int>>();vartestSet=newList<DataVector<double,int>>();//读取数据varfile=newStreamReader("breast-cancer-wisconsin.txt",Encoding.Default);for(inti=0;i<699;++i){stringline=file.ReadLine();varparts=line.Split(',');varp=newDataVector<double,int>(9);for(intj=0;j<p.Dimension;++j){if(parts[j+1]=="?")parts[j+1]="0";p.Data[j]=Convert.ToDouble(parts[j+1]);}p.Label=Convert.ToInt32(parts[10])==2?1:-1;//和上次一样,600个做训练,99个做测试if(i<600)trainingSet.Add(p);elsetestSet.Add(p);}file.Close();//检验varsvm=newSVM();svm.Train(trainingSet,1,0.01,3.0,10);interror=0;foreach(varpintestSet){varlabel=boost.Classify(p);if(label!=p.Label)error++;}Console.WriteLine("Error={0}/{1},{2}%",error,testSet.Count,(error*100.0/testSet.Count));}
最终结果是99个测试样本猜错1个,错误率1.01%,和AdaBoost相当。
Train时使用不同的参数,错误率会变化,很可惜的是参数的选择往往没有固定的方法,需要在一定范围内尝试以得到最小错误率。
另外对于为什么核函数可以处理线性不可分数据,网上有2张图很能说明问题,转载一下:
以下数据明显是线性不可分的,在二维空间下找不到一条直线能将数据分开:
但在在二维空间下的线性不可分,到了三维空间,是可以找到一个平面来分隔数据的:
声明:本站所有文章资源内容,如无特殊说明或标注,均为采集网络资源。如若本站内容侵犯了原著者的合法权益,可联系本站删除。