summaryrefslogtreecommitdiff
path: root/model/ranksvmtn.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'model/ranksvmtn.cpp')
-rw-r--r--model/ranksvmtn.cpp26
1 files changed, 15 insertions, 11 deletions
diff --git a/model/ranksvmtn.cpp b/model/ranksvmtn.cpp
index 1414c81..f904fdd 100644
--- a/model/ranksvmtn.cpp
+++ b/model/ranksvmtn.cpp
@@ -7,7 +7,7 @@ using namespace std;
using namespace Eigen;
// Main terminating criteria
-const int maxiter = 40; // max iteration count
+const int maxiter = 50; // max iteration count
const double prec=1e-10; // precision
// conjugate gradient
const double cg_prec=1e-10; // precision
@@ -17,14 +17,21 @@ const int ls_maxiter = 10;
const double line_prec=1e-10; // precision
const double line_turb=1e-15; // purturbation
+void cal_Dw(RidList &D,const VectorXd &w, VectorXd &Dw)
+{
+ int n = D.getSize();
+ #pragma omp parallel for
+ for (int i=0;i<n;++i)
+ Dw(i) = D.getVec(i).dot(w);
+}
+
int cal_Hs(RidList &D,const vector<int> &rank,const VectorXd &corr,const VectorXd &alpha,const VectorXd s,VectorXd &Hs)
{
int n = D.getSize();
int q = D.getqSize();
Hs = VectorXd::Zero(s.rows());
VectorXd Ds(n);
- for (int i=0;i<n;++i)
- Ds(i) = D.getVec(i).dot(s);
+ cal_Dw(D,s,Ds);
VectorXd gamma(n);
for (int i=0;i<n;)
{
@@ -153,8 +160,7 @@ int line_search(const VectorXd &w,RidList &D,const VectorXd &corr,const VectorXd
{
int n=D.getSize();
VectorXd Dd(n);
- for (int i=0;i<n;++i)
- Dd(i) = D.getVec(i).dot(step);
+ cal_Dw(D,step,Dd);
VectorXd alpha,beta,yt;
VectorXd grad;
VectorXd Hs;
@@ -167,8 +173,7 @@ int line_search(const VectorXd &w,RidList &D,const VectorXd &corr,const VectorXd
while (1)
{
grad=w+t*step;
- for (int i=0;i<n;++i)
- Dd(i) = D.getVec(i).dot(grad);
+ cal_Dw(D,grad,Dd);
cal_alpha_beta(Dd,corr,D,rank,yt,alpha,beta);
VectorXd tmp = alpha.cwiseProduct(yt)-beta;
VectorXd res = 0*grad;
@@ -209,8 +214,7 @@ int train_orig(int fsize, RidList &Data,const VectorXd &corr,VectorXd &weight){
VectorXd alpha,beta;
while (true)
{
- for (int i=0;i<n;++i)
- dw(i) = Data.getVec(i).dot(weight);
+ cal_Dw(Data,weight,dw);
cal_alpha_beta(dw,corr,Data,rank,yt,alpha,beta);
// Generate support vector matrix sv & gradient
obj = (weight.dot(weight) + C*(alpha.dot(yt.cwiseProduct(yt))-beta.dot(yt)))/2;
@@ -251,9 +255,9 @@ int RSVMTN::train(RidList &D){
return 0;
};
-int RSVMTN::predict(DataList &D, vector<double> &res){
+int RSVMTN::predict(RidList &D, vector<double> &res){
res.clear();
for (int i=0;i<D.getSize();++i)
- res.push_back(((D.getData()[i])->feature).dot(model.weight));
+ res.push_back(D.getVec(i).dot(model.weight));
return 0;
}; \ No newline at end of file