summaryrefslogtreecommitdiff
path: root/model/ranksvmtn.cpp
diff options
context:
space:
mode:
authorJoe Zhao <ztuowen@gmail.com>2015-05-31 15:10:15 +0800
committerJoe Zhao <ztuowen@gmail.com>2015-05-31 15:10:15 +0800
commitf77a4937a5f09d1f49a114ada70b9e11be44b1d6 (patch)
tree0bb1cd40fe0ab6de90463672a3ad7bd531c4edd7 /model/ranksvmtn.cpp
parentc562985afc32a7e2a4c08dcf4947c5d920686f94 (diff)
downloadranksvm-f77a4937a5f09d1f49a114ada70b9e11be44b1d6.tar.gz
ranksvm-f77a4937a5f09d1f49a114ada70b9e11be44b1d6.tar.bz2
ranksvm-f77a4937a5f09d1f49a114ada70b9e11be44b1d6.zip
misc
Diffstat (limited to 'model/ranksvmtn.cpp')
-rw-r--r--model/ranksvmtn.cpp24
1 files changed, 15 insertions, 9 deletions
diff --git a/model/ranksvmtn.cpp b/model/ranksvmtn.cpp
index 4572484..15a5e10 100644
--- a/model/ranksvmtn.cpp
+++ b/model/ranksvmtn.cpp
@@ -6,6 +6,8 @@
using namespace std;
using namespace Eigen;
+double RC=0;
+
void cal_Dw(RidList &D,const VectorXd &w, VectorXd &Dw)
{
int n = D.getSize();
@@ -63,7 +65,7 @@ int cal_Hs(RidList &D,const vector<int> &rank,const VectorXd &corr,const VectorX
tmp(i) = alpha(i)*Ds(i)-gamma(i);
VectorXd res = VectorXd::Zero(D.getfSize());
cal_Dtw(D,tmp,res);
- Hs = s + C*res;
+ Hs = s + RC*res;
return 0;
}
@@ -188,7 +190,7 @@ int line_search(const VectorXd &w,RidList &D,const VectorXd &corr,const VectorXd
VectorXd tmp = alpha.cwiseProduct(yt)-beta;
VectorXd res = VectorXd::Zero(D.getfSize());
cal_Dtw(D,tmp,res);
- grad = grad + C*res;
+ grad = grad + RC*res;
g = grad.dot(step);
cal_Hs(D,rank,corr,alpha,step,Hs);
h = Hs.dot(step);
@@ -216,7 +218,7 @@ int train_orig(int fsize, RidList &Data,const VectorXd &corr,VectorXd &weight){
VectorXd grad(fsize);
VectorXd step(fsize);
vector<int> rank(n);
- double obj,t;
+ double obj,t,l;
VectorXd dw(n);
VectorXd yt;
@@ -227,11 +229,12 @@ int train_orig(int fsize, RidList &Data,const VectorXd &corr,VectorXd &weight){
cal_Dw(Data,weight,dw);
cal_alpha_beta(dw,corr,Data,rank,yt,alpha,beta);
// Generate support vector matrix sv & gradient
- obj = (weight.dot(weight) + C*(alpha.dot(yt.cwiseProduct(yt))-beta.dot(yt)))/2;
+ l=alpha.dot(yt.cwiseProduct(yt))-beta.dot(yt);
+ obj = (weight.dot(weight) + RC*l)/2;
VectorXd tmp = alpha.cwiseProduct(yt)-beta;
VectorXd res = VectorXd::Zero(fsize);
cal_Dtw(Data,tmp,res);
- grad = weight + C*res;
+ grad = weight + RC*res;
// Solve
cg_solve(Data,rank,corr,alpha,grad,step);
// do line search
@@ -240,7 +243,7 @@ int train_orig(int fsize, RidList &Data,const VectorXd &corr,VectorXd &weight){
// When dec is small enough
double nprec = step.dot(grad)/obj;
++iter;
- LOG(INFO)<<"Iter: "<<iter<<" Obj: " <<obj << " Ndec/Obj:"<<nprec << " linesearch: "<< -t ;
+ LOG(INFO)<<"Iter: "<<iter<<" Obj: " <<obj<< " l: "<< l << " Ndec/Obj:"<<nprec << " linesearch: "<< -t ;
if (iter>= maxiter)
{
LOG(INFO)<< "Maxiter reached";
@@ -254,11 +257,11 @@ int train_orig(int fsize, RidList &Data,const VectorXd &corr,VectorXd &weight){
int RSVMTN::train(RidList &D){
VectorXd corr(D.getSize());
- vector<int> A1,A2;
- int i,j;
+ int i;
LOG(INFO)<<"Processing input";
for (i=0;i<D.getSize();++i)
corr(i)=D.getL(i)>0?0.5:-0.5;
+ RC=2.0*C;
train_orig(fsize,D,corr,model.weight);
return 0;
};
@@ -267,6 +270,9 @@ int RSVMTN::predict(RidList &D, vector<double> &res){
res.clear();
int n = D.getSize();
for (int i=0;i<n;++i)
- res.push_back(D.getVecDot(i,model.weight));
+ {
+ double r=D.getVecDot(i,model.weight);
+ res.push_back(r);
+ }
return 0;
}; \ No newline at end of file