From 2aed1b11102196f3d839b2801a92a87243355725 Mon Sep 17 00:00:00 2001 From: Joe Zhao Date: Wed, 8 Apr 2015 17:33:02 +0800 Subject: conjugate gradient method tested --- model/ranksvmtn.cpp | 42 +++++++++++++++++++++++++++++------------- model/ranksvmtn.h | 2 ++ 2 files changed, 31 insertions(+), 13 deletions(-) (limited to 'model') diff --git a/model/ranksvmtn.cpp b/model/ranksvmtn.cpp index 539ab5e..fe29468 100644 --- a/model/ranksvmtn.cpp +++ b/model/ranksvmtn.cpp @@ -1,4 +1,6 @@ #include "ranksvmtn.h" +#include +#include"../tools/matrixIO.h" using namespace std; using namespace Eigen; @@ -6,31 +8,44 @@ using namespace Eigen; const int maxiter = 10; const double prec=1e-3; -int cg_solve(const MatrixXd &A, const VectorXd &b, const VectorXd &x) +int cg_solve(const MatrixXd &A, const VectorXd &b, VectorXd &x) { double alpha,beta,r_1,r_2; - VectorXd p = x; + int step=0; VectorXd q; - VectorXd res; + VectorXd res = b - A*x; + VectorXd p = res; while (1) { - beta = r_1/r_2; - p = res + beta*p; + // Non preconditioned version + r_1 = res.dot(res); + cout<0) @@ -50,6 +65,7 @@ int RSVMTN::train(DataSet &D, Labels &label){ int iter = 0; MatrixXd A; + // TODO Undefined int n=D.rows(); LOG(INFO) << "training with feature size:" << fsize << " Data size:" << n; @@ -71,7 +87,7 @@ int RSVMTN::train(DataSet &D, Labels &label){ } // Generate support vector matrix sv & gradient - objfunc_linear(D,1,pred,grad,obj,sv); + objfunc_linear(D,A,1,pred,grad,obj,sv); model.weight=model.weight+step*t; // When dec is small enough if (-step.dot(grad) < prec * obj) diff --git a/model/ranksvmtn.h b/model/ranksvmtn.h index cdb9796..703fee4 100644 --- a/model/ranksvmtn.h +++ b/model/ranksvmtn.h @@ -16,4 +16,6 @@ public: virtual int predict(DataSet &D, Labels &res); }; +int cg_solve(const Eigen::MatrixXd &A, const Eigen::VectorXd &b, Eigen::VectorXd &x); + #endif \ No newline at end of file -- cgit v1.2.3-70-g09d2