diff options
author | Joe Zhao <ztuowen@gmail.com> | 2015-04-08 17:33:02 +0800 |
---|---|---|
committer | Joe Zhao <ztuowen@gmail.com> | 2015-04-08 17:33:02 +0800 |
commit | 2aed1b11102196f3d839b2801a92a87243355725 (patch) | |
tree | ab218eec7c626db10ef892751ab5dcc35fa4a490 | |
parent | 0c92357c8ab3a616e51b7cf83d3c038027b8fa99 (diff) | |
download | ranksvm-2aed1b11102196f3d839b2801a92a87243355725.tar.gz ranksvm-2aed1b11102196f3d839b2801a92a87243355725.tar.bz2 ranksvm-2aed1b11102196f3d839b2801a92a87243355725.zip |
conjugate gradient method tested
-rw-r--r-- | main.cpp | 11 | ||||
-rw-r--r-- | model/ranksvmtn.cpp | 42 | ||||
-rw-r--r-- | model/ranksvmtn.h | 2 |
3 files changed, 41 insertions, 14 deletions
@@ -9,7 +9,7 @@ INITIALIZE_EASYLOGGINGPP -using Eigen::MatrixXd; +using namespace Eigen; namespace po = boost::program_options; po::variables_map vm; @@ -76,6 +76,15 @@ int main(int argc, char **argv) { po::store(po::parse_command_line(argc, argv, desc), vm); po::notify(vm); + /* Conjugate Gradient method test + MatrixXd A(3,3); + VectorXd b(3),x(3); + A<< 1,2,3,2,2,4,3,4,1; + b<< 1,1,1; + x<< 0,0,0; + cg_solve(A,b,x); + write_stream(std::cout,x);*/ + // Print help if necessary if (vm.count("help") || !(vm.count("train") || vm.count("validate") || vm.count("predict"))) { std::cout << desc; diff --git a/model/ranksvmtn.cpp b/model/ranksvmtn.cpp index 539ab5e..fe29468 100644 --- a/model/ranksvmtn.cpp +++ b/model/ranksvmtn.cpp @@ -1,4 +1,6 @@ #include "ranksvmtn.h" +#include<iostream> +#include"../tools/matrixIO.h" using namespace std; using namespace Eigen; @@ -6,31 +8,44 @@ using namespace Eigen; const int maxiter = 10; const double prec=1e-3; -int cg_solve(const MatrixXd &A, const VectorXd &b, const VectorXd &x) +int cg_solve(const MatrixXd &A, const VectorXd &b, VectorXd &x) { double alpha,beta,r_1,r_2; - VectorXd p = x; + int step=0; VectorXd q; - VectorXd res; + VectorXd res = b - A*x; + VectorXd p = res; while (1) { - beta = r_1/r_2; - p = res + beta*p; + // Non preconditioned version + r_1 = res.dot(res); + cout<<step<<":"<<r_1<<endl; + write_stream(cout,res); + if (r_1<1e-5) // Terminate condition + break; + if (step){ + beta = r_1 / r_2; + p = res + p * beta; + } + q = A*p; alpha = r_1/p.dot(q); - // Non preconditioned version - alpha = p.dot(p)/(p.dot(q)); - res=res-alpha*q; - break; + x=x+p*alpha; + res=res-q*alpha; + write_stream(cout,p); + write_stream(cout,q); + cin.get(); + ++step; + r_2=r_1; } return 0; } // Calculate objfunc gradient & support vectors -int objfunc_linear(const VectorXd &w,const double C,const VectorXd &pred,const VectorXd &grad, double &obj,MatrixXd &sv) +int objfunc_linear(const VectorXd &w,const MatrixXd &A,const double C,VectorXd &pred,VectorXd &grad, double &obj,MatrixXd &sv) { - pred = pred.cwiseMax(Matrix::Zero(pred.rows(),pred.cols())); - obj = (pred.cwiseProduct(pred)*(C/2)) + w.transpose()*w/2; + pred = pred.cwiseMax(MatrixXd::Zero(pred.rows(),pred.cols())); +// obj = (pred.cwiseProduct(pred)*(C/2)) + w.transpose()*w/2; grad = w - (((pred*C).transpose()*A)*w).transpose(); for (int i=0;i<pred.cols();++i) if (pred(i)>0) @@ -50,6 +65,7 @@ int RSVMTN::train(DataSet &D, Labels &label){ int iter = 0; MatrixXd A; + // TODO Undefined int n=D.rows(); LOG(INFO) << "training with feature size:" << fsize << " Data size:" << n; @@ -71,7 +87,7 @@ int RSVMTN::train(DataSet &D, Labels &label){ } // Generate support vector matrix sv & gradient - objfunc_linear(D,1,pred,grad,obj,sv); + objfunc_linear(D,A,1,pred,grad,obj,sv); model.weight=model.weight+step*t; // When dec is small enough if (-step.dot(grad) < prec * obj) diff --git a/model/ranksvmtn.h b/model/ranksvmtn.h index cdb9796..703fee4 100644 --- a/model/ranksvmtn.h +++ b/model/ranksvmtn.h @@ -16,4 +16,6 @@ public: virtual int predict(DataSet &D, Labels &res); }; +int cg_solve(const Eigen::MatrixXd &A, const Eigen::VectorXd &b, Eigen::VectorXd &x); + #endif
\ No newline at end of file |