summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoe Zhao <ztuowen@gmail.com>2015-04-12 00:51:14 +0800
committerJoe Zhao <ztuowen@gmail.com>2015-04-12 00:51:14 +0800
commit6c77acb550288883c25b3c2a769313d5466dda70 (patch)
tree6d06d5568e48b95f9a4175c77deb2d247ffcc10a
parent138634e6d4700fa6cc9e4e140d195be878074934 (diff)
downloadranksvm-6c77acb550288883c25b3c2a769313d5466dda70.tar.gz
ranksvm-6c77acb550288883c25b3c2a769313d5466dda70.tar.bz2
ranksvm-6c77acb550288883c25b3c2a769313d5466dda70.zip
PRSVM+, tested
-rw-r--r--main.cpp11
-rw-r--r--model/ranksvmtn.cpp38
2 files changed, 21 insertions, 28 deletions
diff --git a/main.cpp b/main.cpp
index e89cfe1..eeb6b99 100644
--- a/main.cpp
+++ b/main.cpp
@@ -54,7 +54,7 @@ int predict() {
rsvm->predict(D,L);
}
- LOG(INFO)<<"Training finished,saving prediction";
+ LOG(INFO)<<"Finished,saving prediction";
std::ofstream fout(vm["output"].as<std::string>().c_str());
for (int i=0; i<L.size();++i)
@@ -88,15 +88,6 @@ int main(int argc, char **argv) {
po::store(po::parse_command_line(argc, argv, desc), vm);
po::notify(vm);
- /* Conjugate Gradient method test
- MatrixXd A(3,3);
- VectorXd b(3),x(3);
- A<< 1,2,3,2,2,4,3,4,1;
- b<< 1,1,1;
- x<< 0,0,0;
- cg_solve(A,b,x);
- write_stream(std::cout,x);*/
-
// Print help if necessary
if (vm.count("help") || !(vm.count("train") || vm.count("validate") || vm.count("predict"))) {
std::cout << desc;
diff --git a/model/ranksvmtn.cpp b/model/ranksvmtn.cpp
index fec7935..cd58aa9 100644
--- a/model/ranksvmtn.cpp
+++ b/model/ranksvmtn.cpp
@@ -6,14 +6,17 @@
using namespace std;
using namespace Eigen;
-const int maxiter = 10;
-const double prec=1e-4;
-const double C=1;
-const double cg_prec=1e-10;
-const double line_prec=1e-10;
-const double line_turb=1e-12;
-
-int cal_Hs(const MatrixXd &D,const vector<int> &rank,const VectorXd &corr,const VectorXd &alpha,const vector<int> &A1,const vector<int> &A2,const double &C,const VectorXd s,VectorXd &Hs)
+const double C=1e-2; // Compensating & scaling
+// Main terminating criteria
+const int maxiter = 10; // max iteration count
+const double prec=1e-4; // precision
+// conjugate gradient
+const double cg_prec=1e-10; // precision
+// line search
+const double line_prec=1e-10; // precision
+const double line_turb=1e-15; // purturbation
+
+int cal_Hs(const MatrixXd &D,const vector<int> &rank,const VectorXd &corr,const VectorXd &alpha,const vector<int> &A1,const vector<int> &A2,const VectorXd s,VectorXd &Hs)
{
Hs = VectorXd::Zero(s.rows());
VectorXd Ds=D*s;
@@ -33,17 +36,17 @@ int cal_Hs(const MatrixXd &D,const vector<int> &rank,const VectorXd &corr,const
else
g+=Ds[rank[j]];
}
- Hs = s + (D.transpose()*(alpha.cwiseProduct(Ds) - gamma));
+ Hs = s + C*(D.transpose()*(alpha.cwiseProduct(Ds) - gamma));
return 0;
}
-int cg_solve(const MatrixXd &D,const vector<int> &rank,const VectorXd &corr,const VectorXd &alph,const vector<int> &A1,const vector<int> &A2,const VectorXd &b,const double &C, VectorXd &x)
+int cg_solve(const MatrixXd &D,const vector<int> &rank,const VectorXd &corr,const VectorXd &alph,const vector<int> &A1,const vector<int> &A2,const VectorXd &b, VectorXd &x)
{
double alpha,beta,r_1,r_2;
int step=0;
VectorXd q;
VectorXd Hs;
- cal_Hs(D,rank,corr,alph,A1,A2,C,x,Hs);
+ cal_Hs(D,rank,corr,alph,A1,A2,x,Hs);
VectorXd res = b - Hs;
VectorXd p = res;
while (1)
@@ -56,7 +59,7 @@ int cg_solve(const MatrixXd &D,const vector<int> &rank,const VectorXd &corr,cons
beta = r_1 / r_2;
p = res + p * beta;
}
- cal_Hs(D,rank,corr,alph,A1,A2,C,p,q);
+ cal_Hs(D,rank,corr,alph,A1,A2,p,q);
alpha = r_1/p.dot(q);
x=x+p*alpha;
res=res-q*alpha;
@@ -129,7 +132,6 @@ int cal_alpha_beta(const VectorXd &dw,const VectorXd &corr,const vector<int> &A1
// line search using newton method
int line_search(const VectorXd &w,const MatrixXd &D,const VectorXd &corr,const vector<int> &A1,const vector<int> &A2,const VectorXd &step,double &t)
{
- double wd=w.dot(step),dd=step.dot(step);
VectorXd Dd = D*step;
VectorXd Xd = VectorXd::Zero(A1.size());
VectorXd alpha,beta,yt;
@@ -146,9 +148,9 @@ int line_search(const VectorXd &w,const MatrixXd &D,const VectorXd &corr,const v
grad=w+t*step;
Dd = D*(w + t*step);
cal_alpha_beta(Dd,corr,A1,A2,rank,yt,alpha,beta);
- grad = grad + (D.transpose()*(alpha.cwiseProduct(yt)-beta));
+ grad = grad + C*(D.transpose()*(alpha.cwiseProduct(yt)-beta));
g = grad.dot(step);
- cal_Hs(D,rank,corr,alpha,A1,A2,C,step,Hs);
+ cal_Hs(D,rank,corr,alpha,A1,A2,step,Hs);
h = Hs.dot(step);
g=g+line_turb;
h = h+line_turb;
@@ -186,11 +188,11 @@ int train_orig(int fsize, MatrixXd &D,const vector<int> &A1,const vector<int> &A
cal_alpha_beta(dw,corr,A1,A2,rank,yt,alpha,beta);
// Generate support vector matrix sv & gradient
- obj = (weight.dot(weight) + (alpha.dot(yt.cwiseProduct(yt))-beta.dot(yt)))/2;//
- grad = weight + (D.transpose()*(alpha.cwiseProduct(yt)-beta));
+ obj = (weight.dot(weight) + C*(alpha.dot(yt.cwiseProduct(yt))-beta.dot(yt)))/2;//
+ grad = weight + C*(D.transpose()*(alpha.cwiseProduct(yt)-beta));
step = grad*0;
// Solve
- cg_solve(D,rank,corr,alpha,A1,A2,grad,C,step);
+ cg_solve(D,rank,corr,alpha,A1,A2,grad,step);
// do line search
line_search(weight,D,corr,A1,A2,step,t);
weight=weight+step*t;