#include "ranksvmtn.h" #include #include #include"../tools/matrixIO.h" using namespace std; using namespace Eigen; const double C=1e-4; // Compensating & scaling // Main terminating criteria const int maxiter = 20; // max iteration count const double prec=1e-10; // precision // conjugate gradient const double cg_prec=1e-10; // precision const int cg_maxiter = 30; // line search const double line_prec=1e-10; // precision const double line_turb=1e-15; // purturbation int cal_Hs(const MatrixXd &D,const vector &rank,const VectorXd &corr,const VectorXd &alpha,const vector &A1,const vector &A2,const VectorXd s,VectorXd &Hs) { Hs = VectorXd::Zero(s.rows()); VectorXd Ds=D*s; VectorXd gamma(D.rows()); for (int i=0;i=A1[i];--j) if (corr[rank[j]]>0) gamma[rank[j]]=g; else g+=Ds[rank[j]]; } Hs = s + C*(D.transpose()*(alpha.cwiseProduct(Ds) - gamma)); return 0; } int cg_solve(const MatrixXd &D,const vector &rank,const VectorXd &corr,const VectorXd &alph,const vector &A1,const vector &A2,const VectorXd &b, VectorXd &x) { double alpha,beta,r_1,r_2; int iter=0; VectorXd q; VectorXd Hs; cal_Hs(D,rank,corr,alph,A1,A2,x,Hs); VectorXd res = b - Hs; VectorXd p = res; while (1) { // Non preconditioned version r_1 = res.dot(res); if (r_1 cg_maxiter) { LOG(INFO) << "CG forced termination by maxiter, r:"< &rank,VectorXd &ref) { int i=l,j=r,k; double mid=ref(rank[(l+r)>>1]); while (i<=j) { while (ref[rank[i]]mid) --j; if (i<=j) { k=rank[i]; rank[i]=rank[j]; rank[j]=k; ++i; --j; } } if (j>l) ranksort(l,j,rank,ref); if (i &A1,const vector &A2,vector &rank,VectorXd &yt,VectorXd &alpha,VectorXd &beta) { long n = dw.rows(); yt = dw - corr; alpha=VectorXd::Zero(n); beta=VectorXd::Zero(n); for (int i=0;i=A1[i];--j) if (corr[rank[j]]>0) { alpha[rank[j]]=a; beta[rank[j]]=b; } else { a+=1; b+=yt[rank[j]]; } } } // line search using newton method int line_search(const VectorXd &w,const MatrixXd &D,const VectorXd &corr,const vector &A1,const vector &A2,const VectorXd &step,double &t) { VectorXd Dd = D*step; VectorXd Xd = VectorXd::Zero(A1.size()); VectorXd alpha,beta,yt; VectorXd grad; VectorXd Hs; vector rank(D.rows()); int iter = 0; for (int i=0;i cg_maxiter) { LOG(INFO) << "line search forced termination by maxiter, prec:"< &A1,const vector &A2,const VectorXd &corr,VectorXd &weight){ int iter = 0; long n=D.rows(); LOG(INFO) << "training with feature size:" << fsize << " Data size:" << n << " Query size:" << A1.size(); VectorXd grad(fsize); VectorXd step(fsize); vector rank(n); double obj,t; VectorXd dw = D*weight; VectorXd yt; VectorXd alpha,beta; while (true) { iter+=1; if (iter> maxiter) { LOG(INFO)<< "Maxiter reached"; break; } dw = D*weight; cal_alpha_beta(dw,corr,A1,A2,rank,yt,alpha,beta); // Generate support vector matrix sv & gradient obj = (weight.dot(weight) + C*(alpha.dot(yt.cwiseProduct(yt))-beta.dot(yt)))/2;// grad = weight + C*(D.transpose()*(alpha.cwiseProduct(yt)-beta)); step = grad*0; // Solve cg_solve(D,rank,corr,alpha,A1,A2,grad,step); // do line search line_search(weight,D,corr,A1,A2,step,t); weight=weight+step*t; // When dec is small enough LOG(INFO)<<"Iter: "< A1,A2; int i,j; LOG(INFO)<<"Processing input"; vector &dat = D.getData(); for (i=0;irank>0?0.5:-0.5; for (j = 0; j < D.getfSize(); ++j){ Data(i, j) = dat[i]->feature(j);} } i=j=0; while (iqid!=dat[i+1]->qid) { A1.push_back(j); A2.push_back(i); j = i+1; } ++i; } train_orig(fsize,Data,A1,A2,corr,model.weight); return 0; }; int RSVMTN::predict(DataList &D, vector &res){ res.clear(); for (int i=0;ifeature).dot(model.weight)); return 0; };