From cccccbf6cca94a3eaf813b4468453160e91c332b Mon Sep 17 00:00:00 2001 From: Joe Zhao Date: Mon, 14 Apr 2014 08:14:45 +0800 Subject: First commit --- src/TNetLib/.svn/text-base/Activation.cc.svn-base | 138 +++++++ src/TNetLib/.svn/text-base/Activation.h.svn-base | 104 +++++ src/TNetLib/.svn/text-base/Barrier.cc.svn-base | 143 +++++++ src/TNetLib/.svn/text-base/Barrier.h.svn-base | 41 ++ .../.svn/text-base/BiasedLinearity.cc.svn-base | 180 +++++++++ .../.svn/text-base/BiasedLinearity.h.svn-base | 92 +++++ src/TNetLib/.svn/text-base/BlockArray.cc.svn-base | 136 +++++++ src/TNetLib/.svn/text-base/BlockArray.h.svn-base | 85 ++++ src/TNetLib/.svn/text-base/CRBEDctFeat.h.svn-base | 432 +++++++++++++++++++++ src/TNetLib/.svn/text-base/Cache.cc.svn-base | 248 ++++++++++++ src/TNetLib/.svn/text-base/Cache.h.svn-base | 74 ++++ src/TNetLib/.svn/text-base/Component.h.svn-base | 387 ++++++++++++++++++ src/TNetLib/.svn/text-base/Makefile.svn-base | 29 ++ src/TNetLib/.svn/text-base/Mutex.cc.svn-base | 48 +++ src/TNetLib/.svn/text-base/Mutex.h.svn-base | 34 ++ src/TNetLib/.svn/text-base/Nnet.cc.svn-base | 360 +++++++++++++++++ src/TNetLib/.svn/text-base/Nnet.h.svn-base | 194 +++++++++ src/TNetLib/.svn/text-base/ObjFun.cc.svn-base | 231 +++++++++++ src/TNetLib/.svn/text-base/ObjFun.h.svn-base | 160 ++++++++ src/TNetLib/.svn/text-base/Platform.h.svn-base | 397 +++++++++++++++++++ src/TNetLib/.svn/text-base/Semaphore.cc.svn-base | 64 +++ src/TNetLib/.svn/text-base/Semaphore.h.svn-base | 26 ++ .../.svn/text-base/SharedLinearity.cc.svn-base | 277 +++++++++++++ .../.svn/text-base/SharedLinearity.h.svn-base | 103 +++++ src/TNetLib/.svn/text-base/Thread.h.svn-base | 53 +++ 25 files changed, 4036 insertions(+) create mode 100644 src/TNetLib/.svn/text-base/Activation.cc.svn-base create mode 100644 src/TNetLib/.svn/text-base/Activation.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/Barrier.cc.svn-base create mode 100644 src/TNetLib/.svn/text-base/Barrier.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/BiasedLinearity.cc.svn-base create mode 100644 src/TNetLib/.svn/text-base/BiasedLinearity.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/BlockArray.cc.svn-base create mode 100644 src/TNetLib/.svn/text-base/BlockArray.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/CRBEDctFeat.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/Cache.cc.svn-base create mode 100644 src/TNetLib/.svn/text-base/Cache.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/Component.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/Makefile.svn-base create mode 100644 src/TNetLib/.svn/text-base/Mutex.cc.svn-base create mode 100644 src/TNetLib/.svn/text-base/Mutex.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/Nnet.cc.svn-base create mode 100644 src/TNetLib/.svn/text-base/Nnet.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/ObjFun.cc.svn-base create mode 100644 src/TNetLib/.svn/text-base/ObjFun.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/Platform.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/Semaphore.cc.svn-base create mode 100644 src/TNetLib/.svn/text-base/Semaphore.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/SharedLinearity.cc.svn-base create mode 100644 src/TNetLib/.svn/text-base/SharedLinearity.h.svn-base create mode 100644 src/TNetLib/.svn/text-base/Thread.h.svn-base (limited to 'src/TNetLib/.svn/text-base') diff --git a/src/TNetLib/.svn/text-base/Activation.cc.svn-base b/src/TNetLib/.svn/text-base/Activation.cc.svn-base new file mode 100644 index 0000000..8e84190 --- /dev/null +++ b/src/TNetLib/.svn/text-base/Activation.cc.svn-base @@ -0,0 +1,138 @@ + +#include "Activation.h" + + +namespace TNet { + +void Sigmoid::PropagateFnc(const BfMatrix& X, BfMatrix& Y) { + //Y = 1/(1+e^{-X}) + for(size_t r=0; r& X, Matrix& Y) { + const Matrix& out = GetOutput(); + //Y = OUT*(1-OUT)*X //ODVOZENO + for(size_t r=0; r> mDim; + mDimOffset.Init(mDim.Dim()+1); + + int off=0; + for(int i=0; i -0.1 && sum < 0.1) { + BfSubVector y_i_smx_j(Y[i].Range(mDimOffset[j],mDim[j])); + y_i_smx_j.Copy(x_i_smx_j); + } else if (sum > 0.9 && sum < 1.1) { + ; //do nothing + } else { + KALDI_ERR << "Invalid sum: " << sum; + } + } + } + +// X.CheckData("BlockSoftmax BackpropagateFnc X"); +// Y.CheckData("BlockSoftmax BackpropagateFnc Y"); + +} + + + +} //namespace TNet + diff --git a/src/TNetLib/.svn/text-base/Activation.h.svn-base b/src/TNetLib/.svn/text-base/Activation.h.svn-base new file mode 100644 index 0000000..90263d0 --- /dev/null +++ b/src/TNetLib/.svn/text-base/Activation.h.svn-base @@ -0,0 +1,104 @@ + +#ifndef _ACT_FUN_I_ +#define _ACT_FUN_I_ + + +#include "Component.h" + + +namespace TNet +{ + + /** + * Sigmoid activation function + */ + class Sigmoid : public Component + { + public: + Sigmoid(size_t nInputs, size_t nOutputs, Component *pPred) + : Component(nInputs,nOutputs,pPred) + { } + + ComponentType GetType() const + { return SIGMOID; } + + const char* GetName() const + { return ""; } + + Component* Clone() const + { return new Sigmoid(GetNInputs(),GetNOutputs(),NULL); } + + protected: + void PropagateFnc(const BfMatrix& X, BfMatrix& Y); + void BackpropagateFnc(const BfMatrix& X, BfMatrix& Y); + }; + + + /** + * Softmax activation function + */ + class Softmax : public Component + { + public: + Softmax(size_t nInputs, size_t nOutputs, Component *pPred) + : Component(nInputs,nOutputs,pPred) + { } + + ComponentType GetType() const + { return SOFTMAX; } + + const char* GetName() const + { return ""; } + + Component* Clone() const + { return new Softmax(GetNInputs(),GetNOutputs(),NULL); } + + protected: + void PropagateFnc(const BfMatrix& X, BfMatrix& Y); + void BackpropagateFnc(const BfMatrix& X, BfMatrix& Y); + }; + + + /** + * BlockSoftmax activation function. + * It is several softmaxes in one. + * The dimensions of softmaxes are given by integer vector. + * During backpropagation: + * If the derivatives sum up to 0, they are backpropagated. + * If the derivatives sup up to 1, they are discarded + * (like this we know that the softmax was 'inactive'). + */ + class BlockSoftmax : public Component + { + public: + BlockSoftmax(size_t nInputs, size_t nOutputs, Component *pPred) + : Component(nInputs,nOutputs,pPred) + { } + + ComponentType GetType() const + { return BLOCK_SOFTMAX; } + + const char* GetName() const + { return ""; } + + Component* Clone() const + { return new BlockSoftmax(*this); } + + void ReadFromStream(std::istream& rIn); + void WriteToStream(std::ostream& rOut); + + protected: + void PropagateFnc(const BfMatrix& X, BfMatrix& Y); + void BackpropagateFnc(const BfMatrix& X, BfMatrix& Y); + + private: + Vector mDim; + Vector mDimOffset; + }; + + + +} //namespace + + +#endif diff --git a/src/TNetLib/.svn/text-base/Barrier.cc.svn-base b/src/TNetLib/.svn/text-base/Barrier.cc.svn-base new file mode 100644 index 0000000..0170e04 --- /dev/null +++ b/src/TNetLib/.svn/text-base/Barrier.cc.svn-base @@ -0,0 +1,143 @@ +/* + * barrier.c + * + * This file implements the "barrier" synchronization construct. + * + * A barrier causes threads to wait until a set of threads has + * all "reached" the barrier. The number of threads required is + * set when the barrier is initialized, and cannot be changed + * except by reinitializing. + * + * The barrier_init() and barrier_destroy() functions, + * respectively, allow you to initialize and destroy the + * barrier. + * + * The barrier_wait() function allows a thread to wait for a + * barrier to be completed. One thread (the one that happens to + * arrive last) will return from barrier_wait() with the status + * -1 on success -- others will return with 0. The special + * status makes it easy for the calling code to cause one thread + * to do something in a serial region before entering another + * parallel section of code. + */ +#include +#include "Error.h" +#include "Barrier.h" + +namespace TNet { + +/* + * Initialize a barrier for use. + */ +Barrier::Barrier(int count) + : threshold_(count), counter_(count), cycle_(0) { + + if(0 != pthread_mutex_init(&mutex_, NULL)) + KALDI_ERR << "Cannot initialize mutex"; + + if(0 != pthread_cond_init(&cv_, NULL)) { + pthread_mutex_destroy(&mutex_); + KALDI_ERR << "Cannot initilize condv"; + } +} + +/* + * Destroy a barrier when done using it. + */ +Barrier::~Barrier() { + + if(0 != pthread_mutex_lock(&mutex_)) + KALDI_ERR << "Cannot lock mutex"; + + /* + * Check whether any threads are known to be waiting; report + * "BUSY" if so. + */ + if(counter_ != threshold_) { + pthread_mutex_unlock (&mutex_); + KALDI_ERR << "Cannot destroy barrier with waiting thread"; + } + + if(0 != pthread_mutex_unlock(&mutex_)) + KALDI_ERR << "Cannot unlock barrier"; + + /* + * If unable to destroy either 1003.1c synchronization + * object, halt + */ + if(0 != pthread_mutex_destroy(&mutex_)) + KALDI_ERR << "Cannot destroy mutex"; + + if(0 != pthread_cond_destroy(&cv_)) + KALDI_ERR << "Cannot destroy condv"; +} + + +void Barrier::SetThreshold(int thr) { + if(counter_ != threshold_) + KALDI_ERR << "Cannot set threshold, while a thread is waiting"; + + threshold_ = thr; counter_ = thr; +} + + + +/* + * Wait for all members of a barrier to reach the barrier. When + * the count (of remaining members) reaches 0, broadcast to wake + * all threads waiting. + */ +int Barrier::Wait() { + int status, cancel, tmp, cycle; + + if(threshold_ == 0) + KALDI_ERR << "Cannot wait when Threshold value was not set"; + + if(0 != pthread_mutex_lock(&mutex_)) + KALDI_ERR << "Cannot lock mutex"; + + cycle = cycle_; /* Remember which cycle we're on */ + + if(--counter_ == 0) { + cycle_ = !cycle_; + counter_ = threshold_; + status = pthread_cond_broadcast(&cv_); + /* + * The last thread into the barrier will return status + * -1 rather than 0, so that it can be used to perform + * some special serial code following the barrier. + */ + if(status == 0) status = -1; + } else { + /* + * Wait with cancellation disabled, because barrier_wait + * should not be a cancellation point. + */ + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel); + + /* + * Wait until the barrier's cycle changes, which means + * that it has been broadcast, and we don't want to wait + * anymore. + */ + while (cycle == cycle_) { + status = pthread_cond_wait(&cv_, &mutex_); + if (status != 0) break; + } + + pthread_setcancelstate(cancel, &tmp); + } + /* + * Ignore an error in unlocking. It shouldn't happen, and + * reporting it here would be misleading -- the barrier wait + * completed, after all, whereas returning, for example, + * EINVAL would imply the wait had failed. The next attempt + * to use the barrier *will* return an error, or hang, due + * to whatever happened to the mutex. + */ + pthread_mutex_unlock (&mutex_); + return status; /* error, -1 for waker, or 0 */ +} + + +}//namespace TNet diff --git a/src/TNetLib/.svn/text-base/Barrier.h.svn-base b/src/TNetLib/.svn/text-base/Barrier.h.svn-base new file mode 100644 index 0000000..a5849d2 --- /dev/null +++ b/src/TNetLib/.svn/text-base/Barrier.h.svn-base @@ -0,0 +1,41 @@ +/* + * barrier.h + * + * This header file describes the "barrier" synchronization + * construct. The type barrier_t describes the full state of the + * barrier including the POSIX 1003.1c synchronization objects + * necessary. + * + * A barrier causes threads to wait until a set of threads has + * all "reached" the barrier. The number of threads required is + * set when the barrier is initialized, and cannot be changed + * except by reinitializing. + */ +#include + +#ifndef barrier_h +#define barrier_h + +namespace TNet { + +/* + * Structure describing a barrier. + */ +class Barrier { + public: + Barrier(int count=0); + ~Barrier(); + void SetThreshold(int thr); + int Wait(); + private: + pthread_mutex_t mutex_; /* Control access to barrier */ + pthread_cond_t cv_; /* wait for barrier */ + int threshold_; /* number of threads required */ + int counter_; /* current number of threads */ + int cycle_; /* alternate wait cycles (0 or 1) */ +}; + +}//namespace TNet + +#endif + diff --git a/src/TNetLib/.svn/text-base/BiasedLinearity.cc.svn-base b/src/TNetLib/.svn/text-base/BiasedLinearity.cc.svn-base new file mode 100644 index 0000000..b52aeb0 --- /dev/null +++ b/src/TNetLib/.svn/text-base/BiasedLinearity.cc.svn-base @@ -0,0 +1,180 @@ + + +#include "BiasedLinearity.h" + + +namespace TNet { + + +void +BiasedLinearity:: +PropagateFnc(const Matrix& X, Matrix& Y) +{ + //y = b + x.A + + //precopy bias + size_t rows = X.Rows(); + for(size_t i=0; i& X, Matrix& Y) +{ + // e' = e.A^T + Y.Zero(); + Y.BlasGemm(1.0f, X, NO_TRANS, *mpLinearity, TRANS, 0.0f); +} + + + +void +BiasedLinearity:: +ReadFromStream(std::istream& rIn) +{ + //matrix is stored transposed as SNet does + Matrix transpose; + rIn >> transpose; + mLinearity = Matrix(transpose, TRANS); + //biases stored normally + rIn >> mBias; +} + + +void +BiasedLinearity:: +WriteToStream(std::ostream& rOut) +{ + //matrix is stored transposed as SNet does + Matrix transpose(mLinearity, TRANS); + rOut << transpose; + //biases stored normally + rOut << mBias; + rOut << std::endl; +} + + +void +BiasedLinearity:: +Gradient() +{ + //calculate gradient of weight matrix + mLinearityCorrection.Zero(); + mLinearityCorrection.BlasGemm(1.0f, GetInput(), TRANS, + GetErrorInput(), NO_TRANS, + 0.0f); + + //calculate gradient of bias + mBiasCorrection.Set(0.0f); + size_t rows = GetInput().Rows(); + for(size_t i=0; i(src); + + //allocate accumulators when needed + if(mLinearityCorrectionAccu.MSize() == 0) { + mLinearityCorrectionAccu.Init(mLinearity.Rows(),mLinearity.Cols()); + } + if(mBiasCorrectionAccu.MSize() == 0) { + mBiasCorrectionAccu.Init(mBias.Dim()); + } + + //need to find out which rows to sum... + int div = mLinearityCorrection.Rows() / thrN; + int mod = mLinearityCorrection.Rows() % thrN; + + int origin = thr * div + ((mod > thr)? thr : mod); + int rows = div + ((mod > thr)? 1 : 0); + + //create the matrix windows + const SubMatrix src_mat ( + src_comp.mLinearityCorrection, + origin, rows, + 0, mLinearityCorrection.Cols() + ); + SubMatrix tgt_mat ( + mLinearityCorrectionAccu, + origin, rows, + 0, mLinearityCorrection.Cols() + ); + //sum the rows + Add(tgt_mat,src_mat); + + //first thread will always sum the bias correction + if(thr == 0) { + Add(mBiasCorrectionAccu,src_comp.mBiasCorrection); + } + +} + + +void +BiasedLinearity:: +Update(int thr, int thrN) +{ + //need to find out which rows to sum... + int div = mLinearity.Rows() / thrN; + int mod = mLinearity.Rows() % thrN; + + int origin = thr * div + ((mod > thr)? thr : mod); + int rows = div + ((mod > thr)? 1 : 0); + + //std::cout << "[P" << thr << "," << origin << "," << rows << "]" << std::flush; + + //get the matrix windows + SubMatrix src_mat ( + mLinearityCorrectionAccu, + origin, rows, + 0, mLinearityCorrection.Cols() + ); + SubMatrix tgt_mat ( + mLinearity, + origin, rows, + 0, mLinearityCorrection.Cols() + ); + + + //update weights + AddScaled(tgt_mat, src_mat, -mLearningRate); + + //perform L2 regularization (weight decay) + BaseFloat L2_decay = -mLearningRate * mWeightcost * mBunchsize; + if(L2_decay != 0.0) { + tgt_mat.AddScaled(L2_decay, tgt_mat); + } + + //first thread always update bias + if(thr == 0) { + //std::cout << "[" << thr << "BP]" << std::flush; + AddScaled(mBias, mBiasCorrectionAccu, -mLearningRate); + } + + //reset the accumulators + src_mat.Zero(); + if(thr == 0) { + mBiasCorrectionAccu.Zero(); + } + +} + +} //namespace diff --git a/src/TNetLib/.svn/text-base/BiasedLinearity.h.svn-base b/src/TNetLib/.svn/text-base/BiasedLinearity.h.svn-base new file mode 100644 index 0000000..5018637 --- /dev/null +++ b/src/TNetLib/.svn/text-base/BiasedLinearity.h.svn-base @@ -0,0 +1,92 @@ +#ifndef _BIASED_LINEARITY_H_ +#define _BIASED_LINEARITY_H_ + + +#include "Component.h" + +#include "Matrix.h" +#include "Vector.h" + + +namespace TNet { + +class BiasedLinearity : public UpdatableComponent +{ + public: + + BiasedLinearity(size_t nInputs, size_t nOutputs, Component *pPred); + ~BiasedLinearity() { } + + ComponentType GetType() const + { return BIASED_LINEARITY; } + + const char* GetName() const + { return ""; } + + Component* Clone() const; + + void PropagateFnc(const Matrix& X, Matrix& Y); + void BackpropagateFnc(const Matrix& X, Matrix& Y); + + void ReadFromStream(std::istream& rIn); + void WriteToStream(std::ostream& rOut); + + /// calculate gradient + void Gradient(); + /// accumulate gradient from other components + void AccuGradient(const UpdatableComponent& src, int thr, int thrN); + /// update weights, reset the accumulator + void Update(int thr, int thrN); + + protected: + Matrix mLinearity; ///< Matrix with neuron weights + Vector mBias; ///< Vector with biases + + const Matrix* mpLinearity; + const Vector* mpBias; + + Matrix mLinearityCorrection; ///< Matrix for linearity updates + Vector mBiasCorrection; ///< Vector for bias updates + + Matrix mLinearityCorrectionAccu; ///< Matrix for summing linearity updates + Vector mBiasCorrectionAccu; ///< Vector for summing bias updates + +}; + + + + +//////////////////////////////////////////////////////////////////////////// +// INLINE FUNCTIONS +// BiasedLinearity:: +inline +BiasedLinearity:: +BiasedLinearity(size_t nInputs, size_t nOutputs, Component *pPred) + : UpdatableComponent(nInputs, nOutputs, pPred), + mLinearity(), mBias(), //cloned instaces don't need this + mpLinearity(&mLinearity), mpBias(&mBias), + mLinearityCorrection(nInputs,nOutputs), mBiasCorrection(nOutputs), + mLinearityCorrectionAccu(), mBiasCorrectionAccu() //cloned instances don't need this +{ } + +inline +Component* +BiasedLinearity:: +Clone() const +{ + BiasedLinearity* ptr = new BiasedLinearity(GetNInputs(), GetNOutputs(), NULL); + ptr->mpLinearity = mpLinearity; //copy pointer from currently active weights + ptr->mpBias = mpBias; //... + + ptr->mLearningRate = mLearningRate; + + return ptr; +} + + + +} //namespace + + + +#endif diff --git a/src/TNetLib/.svn/text-base/BlockArray.cc.svn-base b/src/TNetLib/.svn/text-base/BlockArray.cc.svn-base new file mode 100644 index 0000000..18a41d2 --- /dev/null +++ b/src/TNetLib/.svn/text-base/BlockArray.cc.svn-base @@ -0,0 +1,136 @@ + + +#include "BlockArray.h" +#include "Nnet.h" + + +namespace TNet +{ + + void + BlockArray:: + PropagateFnc(const Matrix& X, Matrix& Y) + { + SubMatrix colsX(X,0,1,0,1); //dummy dimensions + SubMatrix colsY(Y,0,1,0,1); //dummy dimensions + + int X_src_ori=0, Y_tgt_ori=0; + for(int i=0; iGetNInputs(); + int colsY_cnt=mBlocks[i]->GetNOutputs(); + colsX = X.Range(0,X.Rows(),X_src_ori,colsX_cnt); + colsY = Y.Range(0,Y.Rows(),Y_tgt_ori,colsY_cnt); + + //propagate through the block(network) + mBlocks[i]->Propagate(colsX,colsY); + + //shift the origin coordinates + X_src_ori += colsX_cnt; + Y_tgt_ori += colsY_cnt; + } + + assert(X_src_ori == X.Cols()); + assert(Y_tgt_ori == Y.Cols()); + } + + + void + BlockArray:: + BackpropagateFnc(const Matrix& X, Matrix& Y) + { + KALDI_ERR << "Unimplemented"; + } + + + void + BlockArray:: + Update() + { + KALDI_ERR << "Unimplemented"; + } + + + void + BlockArray:: + ReadFromStream(std::istream& rIn) + { + if(mBlocks.size() > 0) { + KALDI_ERR << "Cannot read block vector, " + << "aleady filled bt " + << mBlocks.size() + << "elements"; + } + + rIn >> std::ws >> mNBlocks; + if(mNBlocks < 1) { + KALDI_ERR << "Bad number of blocks:" << mNBlocks; + } + + //read all the blocks + std::string tag; + int block_id; + for(int i=0; i + rIn >> std::ws >> tag; + //make it lowercase + std::transform(tag.begin(), tag.end(), tag.begin(), tolower); + //check + if(tag!="") { + KALDI_ERR << " keywotd expected"; + } + + //read block number + rIn >> std::ws >> block_id; + if(block_id != i+1) { + KALDI_ERR << "Expected block number:" << i+1 + << " read block number: " << block_id; + } + + //read the nnet + Network* p_nnet = new Network; + p_nnet->ReadNetwork(rIn); + if(p_nnet->Layers() == 0) { + KALDI_ERR << "Cannot read empty network to a block"; + } + + //add it to the vector + mBlocks.push_back(p_nnet); + } + + //check the declared dimensionality + int sum_inputs=0, sum_outputs=0; + for(int i=0; iGetNInputs(); + sum_outputs += mBlocks[i]->GetNOutputs(); + } + if(sum_inputs != GetNInputs()) { + KALDI_ERR << "Non-matching number of INPUTS! Declared:" + << GetNInputs() + << " summed from blocks" + << sum_inputs; + } + if(sum_outputs != GetNOutputs()) { + KALDI_ERR << "Non-matching number of OUTPUTS! Declared:" + << GetNOutputs() + << " summed from blocks" + << sum_outputs; + } + } + + + void + BlockArray:: + WriteToStream(std::ostream& rOut) + { + rOut << " " << mBlocks.size() << " "; + for(int i=0; i " << i+1 << "\n"; + mBlocks[i]->WriteNetwork(rOut); + rOut << "\n"; + } + } + + +} //namespace + diff --git a/src/TNetLib/.svn/text-base/BlockArray.h.svn-base b/src/TNetLib/.svn/text-base/BlockArray.h.svn-base new file mode 100644 index 0000000..e6a8657 --- /dev/null +++ b/src/TNetLib/.svn/text-base/BlockArray.h.svn-base @@ -0,0 +1,85 @@ +#ifndef _BLOCK_ARRAY_H_ +#define _BLOCK_ARRAY_H_ + + +#include "Component.h" + +#include "Matrix.h" +#include "Vector.h" + + +namespace TNet { + + class Network; + + class BlockArray : public Component + { + public: + + BlockArray(size_t nInputs, size_t nOutputs, Component *pPred); + ~BlockArray(); + + ComponentType GetType() const; + const char* GetName() const; + + void PropagateFnc(const Matrix& X, Matrix& Y); + void BackpropagateFnc(const Matrix& X, Matrix& Y); + + void Update(); + + void ReadFromStream(std::istream& rIn); + void WriteToStream(std::ostream& rOut); + + //:TODO: + Component* Clone() const { KALDI_ERR << "Unimplemented"; } + + protected: + std::vector mBlocks; ///< vector with networks, one network is one block + size_t mNBlocks; + }; + + + + + //////////////////////////////////////////////////////////////////////////// + // INLINE FUNCTIONS + // BlockArray:: + inline + BlockArray:: + BlockArray(size_t nInputs, size_t nOutputs, Component *pPred) + : Component(nInputs, nOutputs, pPred), + mNBlocks(0) + { } + + + inline + BlockArray:: + ~BlockArray() + { + for(int i=0; i"; + } + + + +} //namespace + + + +#endif diff --git a/src/TNetLib/.svn/text-base/CRBEDctFeat.h.svn-base b/src/TNetLib/.svn/text-base/CRBEDctFeat.h.svn-base new file mode 100644 index 0000000..0984c36 --- /dev/null +++ b/src/TNetLib/.svn/text-base/CRBEDctFeat.h.svn-base @@ -0,0 +1,432 @@ +#ifndef _CUCRBEDCTFEATURES_H_ +#define _CUCRBEDCTFEATURES_H_ + + +#include "Component.h" +#include "Matrix.h" +#include "Vector.h" +#include "cblas.h" + + +namespace TNet { + + /** + * Expands the time context of the input features + * in N, out k*N, FrameOffset o_1,o_2,...,o_k + * FrameOffset example 11frames: -5 -4 -3 -2 -1 0 1 2 3 4 5 + */ + class Expand : public Component + { + public: + Expand(size_t nInputs, size_t nOutputs, Component* pPred) + : Component(nInputs,nOutputs,pPred) + { } + + ~Expand() + { } + + ComponentType GetType() const + { return EXPAND; } + + const char* GetName() const + { return ""; } + + Component* Clone() const + { + Expand* p = new Expand(GetNInputs(),GetNOutputs(),NULL); + p->mFrameOffset.Init(mFrameOffset.Dim()); + p->mFrameOffset.Copy(mFrameOffset); + return p; + } + + void ReadFromStream(std::istream& rIn) + { rIn >> mFrameOffset; } + + void WriteToStream(std::ostream& rOut) + { rOut << mFrameOffset; } + + protected: + void PropagateFnc(const Matrix& X, Matrix& Y) + { + assert(X.Cols()*mFrameOffset.Dim() == Y.Cols()); + assert(X.Rows() == Y.Rows()); + + for(size_t r=0;r= X.Rows()) r_off = X.Rows()-1; + memcpy(Y.pRowData(r)+off*X.Cols(),X.pRowData(r_off),sizeof(BaseFloat)*X.Cols()); + } + } + } + + void BackpropagateFnc(const Matrix& X, Matrix& Y) + { Error("__func__ Nonsense"); } + + protected: + Vector mFrameOffset; + }; + + + + /** + * Rearrange the matrix columns according to the indices in mCopyFromIndices + */ + class Copy : public Component + { + public: + Copy(size_t nInputs, size_t nOutputs, Component* pPred) + : Component(nInputs,nOutputs,pPred) + { } + + ~Copy() + { } + + ComponentType GetType() const + { return COPY; } + + const char* GetName() const + { return ""; } + + Component* Clone() const + { + Copy* p = new Copy(GetNInputs(),GetNOutputs(),NULL); + p->mCopyFromIndices.Init(mCopyFromIndices.Dim()); + p->mCopyFromIndices.Copy(mCopyFromIndices); + return p; + } + + void ReadFromStream(std::istream& rIn) + { + Vector vec; rIn >> vec; vec.Add(-1); + mCopyFromIndices.Init(vec.Dim()).Copy(vec); + } + + void WriteToStream(std::ostream& rOut) + { + Vector vec(mCopyFromIndices); + vec.Add(1); rOut << vec; + } + + protected: + void PropagateFnc(const Matrix& X, Matrix& Y) + { + assert(mCopyFromIndices.Dim() == Y.Cols()); + for(int i=0; i= 0 && mCopyFromIndices[i] < X.Cols()); + } + + for(size_t r=0; r& X, Matrix& Y) + { Error("__func__ Nonsense"); } + + protected: + Vector mCopyFromIndices; + }; + + class Transpose : public Component + { + public: + Transpose(size_t nInputs, size_t nOutputs, Component* pPred) + : Component(nInputs,nOutputs,pPred), mContext(0) + { } + + ~Transpose() + { } + + ComponentType GetType() const + { return TRANSPOSE; } + + const char* GetName() const + { return ""; } + + Component* Clone() const + { + Transpose* p = new Transpose(GetNInputs(),GetNOutputs(),NULL); + p->mCopyFromIndices.Init(mCopyFromIndices.Dim()); + p->mCopyFromIndices.Copy(mCopyFromIndices); + p->mContext = mContext; + return p; + } + + void ReadFromStream(std::istream& rIn) + { + rIn >> std::ws >> mContext; + + if(GetNInputs() != GetNOutputs()) { + Error("Input dim must be same as output dim"); + } + + Vector vec(GetNInputs()); + int channels = GetNInputs() / mContext; + for(int i=0, ch=0; ch& X, Matrix& Y) + { + assert(mCopyFromIndices.Dim() == Y.Cols()); + for(int i=0; i= 0 && mCopyFromIndices[i] < X.Cols()); + } + + for(size_t r=0; r& X, Matrix& Y) + { Error("__func__ Nonsense"); } + + protected: + int mContext; + Vector mCopyFromIndices; + }; + + + /** + * BlockLinearity is used for the blockwise multiplication by + * DCT transform loaded from disk + */ + class BlockLinearity : public Component + { + public: + BlockLinearity(size_t nInputs, size_t nOutputs, Component* pPred) + : Component(nInputs,nOutputs,pPred) + { } + + ~BlockLinearity() + { } + + + ComponentType GetType() const + { return Component::BLOCK_LINEARITY; } + + const char* GetName() const + { return ""; } + + Component* Clone() const + { + BlockLinearity* p = new BlockLinearity(GetNInputs(),GetNOutputs(),NULL); + p->mBlockLinearity.Init(mBlockLinearity.Rows(),mBlockLinearity.Cols()); + p->mBlockLinearity.Copy(mBlockLinearity); + return p; + } + + void PropagateFnc(const Matrix& X, Matrix& Y) + { + assert(X.Rows() == Y.Rows()); + assert(X.Cols()%mBlockLinearity.Rows() == 0); + assert(Y.Cols()%mBlockLinearity.Cols() == 0); + assert(X.Cols()/mBlockLinearity.Rows() == Y.Cols()/mBlockLinearity.Cols()); + + int instN = X.Cols()/mBlockLinearity.Rows(); + for(int inst=0; inst& X, Matrix& Y) + { Error("__func__ Not implemented"); } + + + void ReadFromStream(std::istream& rIn) + { + Matrix mat; + rIn >> mat; + Matrix trans(mat,TRANS); + mBlockLinearity.Init(trans.Rows(),trans.Cols()).Copy(trans); + + if((GetNOutputs() % mBlockLinearity.Cols() != 0) || + (GetNInputs() % mBlockLinearity.Rows() != 0) || + ((GetNOutputs() / mBlockLinearity.Cols()) != + (GetNInputs() / mBlockLinearity.Rows()))) + { + Error("BlockLinearity matrix dimensions must divide IO dims"); + } + } + + void WriteToStream(std::ostream& rOut) + { + Matrix trans(mBlockLinearity,TRANS); + rOut << trans; + } + + private: + Matrix mBlockLinearity; + }; + + + + class Bias : public Component + { + public: + Bias(size_t nInputs, size_t nOutputs, Component* pPred) + : Component(nInputs,nOutputs,pPred) + { } + + ~Bias() + { } + + + ComponentType GetType() const + { return Component::BIAS; } + + const char* GetName() const + { return ""; } + + Component* Clone() const + { + Bias* p = new Bias(GetNInputs(),GetNOutputs(),NULL); + p->mBias.Init(mBias.Dim()); + p->mBias.Copy(mBias); + return p; + } + + void PropagateFnc(const Matrix& X, Matrix& Y) + { + Y.Copy(X); + for(size_t r=0; r& X, Matrix& Y) + { Y.Copy(X); } + + + void ReadFromStream(std::istream& rIn) + { rIn >> mBias; } + + void WriteToStream(std::ostream& rOut) + { rOut << mBias; } + + private: + Vector mBias; + }; + + + + class Window : public Component + { + public: + Window(size_t nInputs, size_t nOutputs, Component* pPred) + : Component(nInputs, nOutputs, pPred) + { } + + ~Window() + { } + + + ComponentType GetType() const + { return Component::WINDOW; } + + const char* GetName() const + { return ""; } + + Component* Clone() const + { + Window* p = new Window(GetNInputs(),GetNOutputs(),NULL); + p->mWindow.Init(mWindow.Dim()); + p->mWindow.Copy(mWindow); + return p; + } + + + void PropagateFnc(const Matrix& X, Matrix& Y) + { Y.Copy(X); + for(size_t r=0; r& X, Matrix& Y) + { Error("__func__ Not implemented"); } + + + void ReadFromStream(std::istream& rIn) + { rIn >> mWindow; } + + void WriteToStream(std::ostream& rOut) + { rOut << mWindow; } + + private: + Vector mWindow; + }; + + class Log : public Component + { + public: + Log(size_t nInputs, size_t nOutputs, Component* pPred) + : Component(nInputs, nOutputs, pPred) + { } + + ~Log() + { } + + + ComponentType GetType() const + { return Component::LOG; } + + const char* GetName() const + { return ""; } + + Component* Clone() const + { return new Log(GetNInputs(),GetNOutputs(),NULL); } + + + void PropagateFnc(const Matrix& X, Matrix& Y) + { Y.Copy(X); Y.ApplyLog(); } + + void BackpropagateFnc(const Matrix& X, Matrix& Y) + { Error("__func__ Not implemented"); } + + + void ReadFromStream(std::istream& rIn) + { } + + void WriteToStream(std::ostream& rOut) + { } + + }; + +} + + +#endif + diff --git a/src/TNetLib/.svn/text-base/Cache.cc.svn-base b/src/TNetLib/.svn/text-base/Cache.cc.svn-base new file mode 100644 index 0000000..f498318 --- /dev/null +++ b/src/TNetLib/.svn/text-base/Cache.cc.svn-base @@ -0,0 +1,248 @@ + +#include + +#include "Cache.h" +#include "Matrix.h" +#include "Vector.h" + + +namespace TNet { + + Cache:: + Cache() + : mState(EMPTY), mIntakePos(0), mExhaustPos(0), mDiscarded(0), + mRandomized(false), mTrace(0) + { } + + Cache:: + ~Cache() + { } + + void + Cache:: + Init(size_t cachesize, size_t bunchsize, long int seed) + { + if((cachesize % bunchsize) != 0) { + KALDI_ERR << "Non divisible cachesize" << cachesize + << " by bunchsize" << bunchsize; + } + + mCachesize = cachesize; + mBunchsize = bunchsize; + + mState = EMPTY; + + mIntakePos = 0; + mExhaustPos = 0; + + mRandomized = false; + + if(seed == 0) { + //generate seed + struct timeval tv; + if (gettimeofday(&tv, 0) == -1) { + Error("gettimeofday does not work."); + exit(-1); + } + seed = (int)(tv.tv_sec) + (int)tv.tv_usec + (int)(tv.tv_usec*tv.tv_usec); + } + + srand48(seed); + + } + + void + Cache:: + AddData(const Matrix& rFeatures, const Matrix& rDesired) + { + assert(rFeatures.Rows() == rDesired.Rows()); + + //lazy buffers allocation + if(mFeatures.Rows() != mCachesize) { + mFeatures.Init(mCachesize,rFeatures.Cols()); + mDesired.Init(mCachesize,rDesired.Cols()); + } + + //warn if segment longer than half-cache + if(rFeatures.Rows() > mCachesize/2) { + std::ostringstream os; + os << "Too long segment and small feature cache! " + << " cachesize: " << mCachesize + << " segmentsize: " << rFeatures.Rows(); + Warning(os.str()); + } + + //change state + if(mState == EMPTY) { + if(mTrace&3) std::cout << "/" << std::flush; + mState = INTAKE; mIntakePos = 0; + + //check for leftover from previous segment + int leftover = mFeaturesLeftover.Rows(); + //check if leftover is not bigger than cachesize + if(leftover > mCachesize) { + std::ostringstream os; + os << "Too small feature cache: " << mCachesize + << ", truncating: " + << leftover - mCachesize << " frames from previous segment leftover"; + //Error(os.str()); + Warning(os.str()); + leftover = mCachesize; + } + //prefill cache with leftover + if(leftover > 0) { + memcpy(mFeatures.pData(),mFeaturesLeftover.pData(), + (mFeaturesLeftover.MSize() < mFeatures.MSize()? + mFeaturesLeftover.MSize() : mFeatures.MSize()) + ); + memcpy(mDesired.pData(),mDesiredLeftover.pData(), + (mDesiredLeftover.MSize() < mDesired.MSize()? + mDesiredLeftover.MSize() : mDesired.MSize()) + ); + mFeaturesLeftover.Destroy(); + mDesiredLeftover.Destroy(); + mIntakePos += leftover; + } + } + + assert(mState == INTAKE); + assert(rFeatures.Rows() == rDesired.Rows()); + if(mTrace&2) std::cout << "F" << std::flush; + + int cache_space = mCachesize - mIntakePos; + int feature_length = rFeatures.Rows(); + int fill_rows = (cache_space 0); + assert(mFeatures.Stride()==rFeatures.Stride()); + assert(mDesired.Stride()==rDesired.Stride()); + + //copy the data to cache + memcpy(mFeatures.pData()+mIntakePos*mFeatures.Stride(), + rFeatures.pData(), + fill_rows*mFeatures.Stride()*sizeof(BaseFloat)); + + memcpy(mDesired.pData()+mIntakePos*mDesired.Stride(), + rDesired.pData(), + fill_rows*mDesired.Stride()*sizeof(BaseFloat)); + + //copy leftovers + if(leftover > 0) { + mFeaturesLeftover.Init(leftover,mFeatures.Cols()); + mDesiredLeftover.Init(leftover,mDesired.Cols()); + + memcpy(mFeaturesLeftover.pData(), + rFeatures.pData()+fill_rows*rFeatures.Stride(), + mFeaturesLeftover.MSize()); + + memcpy(mDesiredLeftover.pData(), + rDesired.pData()+fill_rows*rDesired.Stride(), + mDesiredLeftover.MSize()); + } + + //update cursor + mIntakePos += fill_rows; + + //change state + if(mIntakePos == mCachesize) { + if(mTrace&3) std::cout << "\\" << std::flush; + mState = FULL; + } + } + + + + void + Cache:: + Randomize() + { + assert(mState == FULL || mState == INTAKE); + + if(mTrace&3) std::cout << "R" << std::flush; + + //lazy initialization of the output buffers + mFeaturesRandom.Init(mCachesize,mFeatures.Cols()); + mDesiredRandom.Init(mCachesize,mDesired.Cols()); + + //generate random series of integers + Vector randmask(mIntakePos); + for(unsigned int i=0; i& rFeatures, Matrix& rDesired) + { + if(mState == EMPTY) { + Error("GetBunch on empty cache!!!"); + } + + //change state if full... + if(mState == FULL) { + if(mTrace&3) std::cout << "\\" << std::flush; + mState = EXHAUST; mExhaustPos = 0; + } + + //final cache is not completely filled + if(mState == INTAKE) { + if(mTrace&3) std::cout << "\\-LAST_CACHE\n" << std::flush; + mState = EXHAUST; mExhaustPos = 0; + } + + assert(mState == EXHAUST); + + //init the output + if(rFeatures.Rows()!=mBunchsize || rFeatures.Cols()!=mFeatures.Cols()) { + rFeatures.Init(mBunchsize,mFeatures.Cols()); + } + if(rDesired.Rows()!=mBunchsize || rDesired.Cols()!=mDesired.Cols()) { + rDesired.Init(mBunchsize,mDesired.Cols()); + } + + //copy the output + if(mRandomized) { + memcpy(rFeatures.pData(), + mFeaturesRandom.pData()+mExhaustPos*mFeatures.Stride(), + rFeatures.MSize()); + + memcpy(rDesired.pData(), + mDesiredRandom.pData()+mExhaustPos*mDesired.Stride(), + rDesired.MSize()); + } else { + memcpy(rFeatures.pData(), + mFeatures.pData()+mExhaustPos*mFeatures.Stride(), + rFeatures.MSize()); + + memcpy(rDesired.pData(), + mDesired.pData()+mExhaustPos*mDesired.Stride(), + rDesired.MSize()); + } + + + //update cursor + mExhaustPos += mBunchsize; + + //change state to EMPTY + if(mExhaustPos > mIntakePos-mBunchsize) { + //we don't have more complete bunches... + mDiscarded += mIntakePos - mExhaustPos; + + mState = EMPTY; + } + } + + +} diff --git a/src/TNetLib/.svn/text-base/Cache.h.svn-base b/src/TNetLib/.svn/text-base/Cache.h.svn-base new file mode 100644 index 0000000..800d92c --- /dev/null +++ b/src/TNetLib/.svn/text-base/Cache.h.svn-base @@ -0,0 +1,74 @@ +#ifndef _CUCACHE_H_ +#define _CUCACHE_H_ + +#include "Matrix.h" + +namespace TNet { + + + /** + * The feature-target pair cache + */ + class Cache { + typedef enum { EMPTY, INTAKE, FULL, EXHAUST } State; + public: + Cache(); + ~Cache(); + + /// Initialize the cache + void Init(size_t cachesize, size_t bunchsize, long int seed = 0); + + /// Add data to cache, returns number of added vectors + void AddData(const Matrix& rFeatures, const Matrix& rDesired); + /// Randomizes the cache + void Randomize(); + /// Get the bunch of training data + void GetBunch(Matrix& rFeatures, Matrix& rDesired); + + + /// Returns true if the cache was completely filled + bool Full() + { return (mState == FULL); } + + /// Returns true if the cache is empty + bool Empty() + { return (mState == EMPTY || mIntakePos < mBunchsize); } + + /// Number of discarded frames + int Discarded() + { return mDiscarded; } + + /// Set the trace message level + void Trace(int trace) + { mTrace = trace; } + + private: + + static long int GenerateRandom(int max) + { return lrand48() % max; } + + State mState; ///< Current state of the cache + + size_t mIntakePos; ///< Number of intaken vectors by AddData + size_t mExhaustPos; ///< Number of exhausted vectors by GetBunch + + size_t mCachesize; ///< Size of cache + size_t mBunchsize; ///< Size of bunch + int mDiscarded; ///< Number of discarded frames + + Matrix mFeatures; ///< Feature cache + Matrix mFeaturesRandom; ///< Feature cache + Matrix mFeaturesLeftover; ///< Feature cache + + Matrix mDesired; ///< Desired vector cache + Matrix mDesiredRandom; ///< Desired vector cache + Matrix mDesiredLeftover; ///< Desired vector cache + + bool mRandomized; + + int mTrace; + }; + +} + +#endif diff --git a/src/TNetLib/.svn/text-base/Component.h.svn-base b/src/TNetLib/.svn/text-base/Component.h.svn-base new file mode 100644 index 0000000..762451e --- /dev/null +++ b/src/TNetLib/.svn/text-base/Component.h.svn-base @@ -0,0 +1,387 @@ +#ifndef _NETWORK_COMPONENT_I_H +#define _NETWORK_COMPONENT_I_H + + +#include "Vector.h" +#include "Matrix.h" + +#include +#include + + +namespace TNet { + + + /** + * Basic element of the network, + * it is a box with defined inputs and outputs, + * and functions to refresh outputs + * + * it is able to compute tranformation function (forward pass) + * and jacobian function (backward pass), + * which is to be implemented in descendents + */ + class Component + { + public: + /// Types of the net components + typedef enum { + UPDATABLE_COMPONENT = 0x0100, + BIASED_LINEARITY, + SHARED_LINEARITY, + + ACT_FUN = 0x0200, + SOFTMAX, + SIGMOID, + BLOCK_SOFTMAX, + + OTHER = 0x0400, + EXPAND, + COPY, + TRANSPOSE, + BLOCK_LINEARITY, + WINDOW, + BIAS, + LOG, + + BLOCK_ARRAY, + } ComponentType; + + + ////////////////////////////////////////////////////////////// + // Constructor & Destructor + public: + Component(size_t nInputs, size_t nOutputs, Component *pPred); + virtual ~Component(); + + ////////////////////////////////////////////////////////////// + // Interface specification (public) + public: + /// Get Type Identification of the component + virtual ComponentType GetType() const = 0; + /// Get Type Label of the component + virtual const char* GetName() const = 0; + /// + virtual bool IsUpdatable() const + { return false; } + /// Clone the component + virtual Component* Clone() const = 0; + + /// Get size of input vectors + size_t GetNInputs() const; + /// Get size of output vectors + size_t GetNOutputs() const; + + /// IO Data getters + const Matrix& GetInput() const; + const Matrix& GetOutput() const; + const Matrix& GetErrorInput() const; + const Matrix& GetErrorOutput() const; + + /// Set input vector (bind with the preceding NetworkComponent) + void SetInput(const Matrix& rInput); + /// Set error input vector (bind with the following NetworkComponent) + void SetErrorInput(const Matrix& rErrorInput); + + /// Perform forward pass propagateion Input->Output + void Propagate(); + /// Perform backward pass propagateion ErrorInput->ErrorOutput + void Backpropagate(); + + /// Reads the component parameters from stream + virtual void ReadFromStream(std::istream& rIn) { } + /// Writes the components parameters to stream + virtual void WriteToStream(std::ostream& rOut) { } + + + /////////////////////////////////////////////////////////////// + // Nonpublic member functions used to update data outputs + protected: + /// Forward pass transformation (to be implemented by descendents...) + virtual void PropagateFnc(const Matrix& X, Matrix& Y) = 0; + /// Backward pass transformation (to be implemented by descendents...) + virtual void BackpropagateFnc(const Matrix& X, Matrix& Y) = 0; + + + /////////////////////////////////////////////////////////////// + // data members + protected: + + size_t mNInputs; ///< Size of input vectors + size_t mNOutputs; ///< Size of output vectors + + const Matrix* mpInput; ///< inputs are NOT OWNED by component + const Matrix* mpErrorInput;///< inputs are NOT OWNED by component + + Matrix mOutput; ///< outputs are OWNED by component + Matrix mErrorOutput; ///< outputs are OWNED by component + + }; + + + /** + * Class UpdatableComponent is a box which has some + * parameters adjustable by learning + * + * you can set the learning rate, lock the params, + * and learn from each data observation + */ + class UpdatableComponent : public Component + { + ////////////////////////////////////////////////////////////// + // Constructor & Destructor + public: + UpdatableComponent(size_t nInputs, size_t nOutputs, Component *pPred); + virtual ~UpdatableComponent(); + + + ////////////////////////////////////////////////////////////// + // Interface specification (public) + public: + /// + virtual bool IsUpdatable() const + { return true; } + + /// calculate gradient + virtual void Gradient() = 0; + /// accumulate gradient from other components + virtual void AccuGradient(const UpdatableComponent& src, int thr, int thrN) = 0; + /// update weights, reset the accumulator + virtual void Update(int thr, int thrN) = 0; + + /// Sets the learning rate of gradient descent + void LearnRate(BaseFloat rate); + /// Gets the learning rate of gradient descent + BaseFloat LearnRate() const; + + void Momentum(BaseFloat mmt); + BaseFloat Momentum() const ; + + void Weightcost(BaseFloat cost); + BaseFloat Weightcost() const; + + void Bunchsize(size_t size); + size_t Bunchsize() const; + + protected: + BaseFloat mLearningRate; + BaseFloat mMomentum; + BaseFloat mWeightcost; + size_t mBunchsize; + }; + + + + + ////////////////////////////////////////////////////////////////////////// + // INLINE FUNCTIONS + // Component:: + inline + Component:: + Component(size_t nInputs, size_t nOutputs, Component *pPred) + : mNInputs(nInputs), mNOutputs(nOutputs), + mpInput(NULL), mpErrorInput(NULL), + mOutput(), mErrorOutput() + { + /* DOUBLE LINK the Components */ + if (pPred != NULL) { + SetInput(pPred->GetOutput()); + pPred->SetErrorInput(GetErrorOutput()); + } + } + + + inline + Component:: + ~Component() + { + ; + } + + inline void + Component:: + Propagate() + { + //initialize output buffer + if(mOutput.Rows() != GetInput().Rows() || mOutput.Cols() != GetNOutputs()) { + mOutput.Init(GetInput().Rows(),GetNOutputs()); + } + //do the dimensionality test + if(GetNInputs() != GetInput().Cols()) { + KALDI_ERR << "Non-matching INPUT dim!!! Network dim: " << GetNInputs() + << " Data dim: " << GetInput().Cols(); + } + //run transform + PropagateFnc(GetInput(),mOutput); + + } + + + inline void + Component:: + Backpropagate() + { + //re-initialize the output buffer + if(mErrorOutput.Rows() != GetErrorInput().Rows() || mErrorOutput.Cols() != GetNInputs()) { + mErrorOutput.Init(GetErrorInput().Rows(),GetNInputs()); + } + + //do the dimensionality test + assert(GetErrorInput().Cols() == mNOutputs); + assert(mErrorOutput.Cols() == mNInputs); + assert(mErrorOutput.Rows() == GetErrorInput().Rows()); + + //transform + BackpropagateFnc(GetErrorInput(),mErrorOutput); + + } + + + inline void + Component:: + SetInput(const Matrix& rInput) + { + mpInput = &rInput; + } + + + inline void + Component:: + SetErrorInput(const Matrix& rErrorInput) + { + mpErrorInput = &rErrorInput; + } + + + inline const Matrix& + Component:: + GetInput() const + { + if (NULL == mpInput) Error("mpInput is NULL"); + return *mpInput; + } + + inline const Matrix& + Component:: + GetOutput() const + { + return mOutput; + } + + inline const Matrix& + Component:: + GetErrorInput() const + { + if (NULL == mpErrorInput) Error("mpErrorInput is NULL"); + return *mpErrorInput; + } + + inline const Matrix& + Component:: + GetErrorOutput() const + { + return mErrorOutput; + } + + inline size_t + Component:: + GetNInputs() const + { + return mNInputs; + } + + inline size_t + Component:: + GetNOutputs() const + { + return mNOutputs; + } + + + + ////////////////////////////////////////////////////////////////////////// + // INLINE FUNCTIONS + // UpdatableComponent:: + + inline + UpdatableComponent:: + UpdatableComponent(size_t nInputs, size_t nOutputs, Component *pPred) + : Component(nInputs, nOutputs, pPred), + mLearningRate(0.0), mMomentum(0.0), mWeightcost(0.0), mBunchsize(0) + { + ; + } + + + inline + UpdatableComponent:: + ~UpdatableComponent() + { + ; + } + + + inline void + UpdatableComponent:: + LearnRate(BaseFloat rate) + { + mLearningRate = rate; + } + + inline BaseFloat + UpdatableComponent:: + LearnRate() const + { + return mLearningRate; + } + + + inline void + UpdatableComponent:: + Momentum(BaseFloat mmt) + { + mMomentum = mmt; + } + + inline BaseFloat + UpdatableComponent:: + Momentum() const + { + return mMomentum; + } + + + inline void + UpdatableComponent:: + Weightcost(BaseFloat cost) + { + mWeightcost = cost; + } + + inline BaseFloat + UpdatableComponent:: + Weightcost() const + { + return mWeightcost; + } + + + inline void + UpdatableComponent:: + Bunchsize(size_t size) + { + mBunchsize = size; + } + + inline size_t + UpdatableComponent:: + Bunchsize() const + { + return mBunchsize; + } + + +} // namespace TNet + + +#endif diff --git a/src/TNetLib/.svn/text-base/Makefile.svn-base b/src/TNetLib/.svn/text-base/Makefile.svn-base new file mode 100644 index 0000000..58ff988 --- /dev/null +++ b/src/TNetLib/.svn/text-base/Makefile.svn-base @@ -0,0 +1,29 @@ + +include ../tnet.mk + +INCLUDE = -I. -I../KaldiLib -I../STKLib/ + +all: libTNetLib.a + +libTNetLib.a: $(OBJ) + $(AR) ruv $@ $(OBJ) + $(RANLIB) $@ + +%.o : %.cc + $(CXX) -o $@ -c $< $(CFLAGS) $(CXXFLAGS) $(INCLUDE) + + + +.PHONY: clean doc depend +clean: + rm -f *.o *.a + +doc: + doxygen ../../doc/doxyfile_TNetLib + +depend: + $(CXX) -M $(CXXFLAGS) *.cc $(INCLUDE) > .depend.mk + +-include .depend.mk + + diff --git a/src/TNetLib/.svn/text-base/Mutex.cc.svn-base b/src/TNetLib/.svn/text-base/Mutex.cc.svn-base new file mode 100644 index 0000000..4ec956a --- /dev/null +++ b/src/TNetLib/.svn/text-base/Mutex.cc.svn-base @@ -0,0 +1,48 @@ + +#include +#include + +#include "Error.h" +#include "Mutex.h" + +namespace TNet { + + +Mutex::Mutex() { + if(0 != pthread_mutex_init(&mutex_,NULL)) + KALDI_ERR << "Cannot initialize mutex"; +} + + +Mutex::~Mutex() { + if(0 != pthread_mutex_destroy(&mutex_)) + KALDI_ERR << "Cannot destroy mutex"; +} + + +void Mutex::Lock() { + if(0 != pthread_mutex_lock(&mutex_)) + KALDI_ERR << "Error on locking mutex"; +} + + +bool Mutex::TryLock() { + int ret = pthread_mutex_lock(&mutex_); + switch (ret) { + case 0: return true; + case EBUSY: return false; + default: KALDI_ERR << "Error on try-locking mutex"; + } + return 0;//make compiler not complain +} + + +void Mutex::Unlock() { + if(0 != pthread_mutex_unlock(&mutex_)) + KALDI_ERR << "Error on unlocking mutex"; +} + + + +}//namespace TNet + diff --git a/src/TNetLib/.svn/text-base/Mutex.h.svn-base b/src/TNetLib/.svn/text-base/Mutex.h.svn-base new file mode 100644 index 0000000..ae2cfff --- /dev/null +++ b/src/TNetLib/.svn/text-base/Mutex.h.svn-base @@ -0,0 +1,34 @@ + +#include + +namespace TNet { + +/** + * This class encapsulates mutex to ensure + * exclusive access to some critical section + * which manipulates shared resources. + * + * The mutex must be unlocked from the + * SAME THREAD which locked it + */ +class Mutex { + public: + Mutex(); + ~Mutex(); + + void Lock(); + + /** + * Try to lock the mutex without waiting for it. + * Returns: true when lock successfull, + * false when mutex was already locked + */ + bool TryLock(); + + void Unlock(); + + private: + pthread_mutex_t mutex_; +}; + +} //namespace TNet diff --git a/src/TNetLib/.svn/text-base/Nnet.cc.svn-base b/src/TNetLib/.svn/text-base/Nnet.cc.svn-base new file mode 100644 index 0000000..4b364ac --- /dev/null +++ b/src/TNetLib/.svn/text-base/Nnet.cc.svn-base @@ -0,0 +1,360 @@ + +#include +//#include +#include + +#include "Nnet.h" +#include "CRBEDctFeat.h" +#include "BlockArray.h" + +namespace TNet { + + + + +void Network::Feedforward(const Matrix& in, Matrix& out, + size_t start_frm_ext, size_t end_frm_ext) { + //empty network: copy input to output + if(mNnet.size() == 0) { + if(out.Rows() != in.Rows() || out.Cols() != in.Cols()) { + out.Init(in.Rows(),in.Cols()); + } + out.Copy(in); + return; + } + + //short input: propagate in one block + if(in.Rows() < 5000) { + Propagate(in,out); + } else {//long input: propagate per parts + //initialize + out.Init(in.Rows(),GetNOutputs()); + Matrix tmp_in, tmp_out; + int done=0, block=1024; + //propagate first part + tmp_in.Init(block+end_frm_ext,in.Cols()); + tmp_in.Copy(in.Range(0,block+end_frm_ext,0,in.Cols())); + Propagate(tmp_in,tmp_out); + out.Range(0,block,0,tmp_out.Cols()).Copy( + tmp_out.Range(0,block,0,tmp_out.Cols()) + ); + done += block; + //propagate middle parts + while((done+2*block) < in.Rows()) { + tmp_in.Init(block+start_frm_ext+end_frm_ext,in.Cols()); + tmp_in.Copy(in.Range(done-start_frm_ext, block+start_frm_ext+end_frm_ext, 0,in.Cols())); Propagate(tmp_in,tmp_out); + out.Range(done,block,0,tmp_out.Cols()).Copy( + tmp_out.Range(start_frm_ext,block,0,tmp_out.Cols()) + ); + done += block; + } + //propagate last part + tmp_in.Init(in.Rows()-done+start_frm_ext,in.Cols()); + tmp_in.Copy(in.Range(done-start_frm_ext,in.Rows()-done+start_frm_ext,0,in.Cols())); + Propagate(tmp_in,tmp_out); + out.Range(done,out.Rows()-done,0,out.Cols()).Copy( + tmp_out.Range(start_frm_ext,tmp_out.Rows()-start_frm_ext,0,tmp_out.Cols()) + ); + + done += tmp_out.Rows()-start_frm_ext; + assert(done == out.Rows()); + } +} + + +void Network::Propagate(const Matrix& in, Matrix& out) { + //empty network: copy input to output + if(mNnet.size() == 0) { + if(out.Rows() != in.Rows() || out.Cols() != in.Cols()) { + out.Init(in.Rows(),in.Cols()); + } + out.Copy(in); + return; + } + + //this will keep pointer to matrix 'in', for backprop + mNnet.front()->SetInput(in); + + //propagate + LayeredType::iterator it; + for(it=mNnet.begin(); it!=mNnet.end(); ++it) { + (*it)->Propagate(); + } + + //copy the output matrix + const Matrix& mat = mNnet.back()->GetOutput(); + if(out.Rows() != mat.Rows() || out.Cols() != mat.Cols()) { + out.Init(mat.Rows(),mat.Cols()); + } + out.Copy(mat); + +} + + +void Network::Backpropagate(const Matrix& globerr) { + //pass matrix to last component + mNnet.back()->SetErrorInput(globerr); + + // back-propagation : reversed order, + LayeredType::reverse_iterator it; + for(it=mNnet.rbegin(); it!=mNnet.rend(); ++it) { + //first component does not backpropagate error (no predecessors) + if(*it != mNnet.front()) { + (*it)->Backpropagate(); + } + //compute gradient if updatable component + if((*it)->IsUpdatable()) { + UpdatableComponent& comp = dynamic_cast(**it); + comp.Gradient(); //compute gradient + } + } +} + + +void Network::AccuGradient(const Network& src, int thr, int thrN) { + LayeredType::iterator it; + LayeredType::const_iterator it2; + + for(it=mNnet.begin(), it2=src.mNnet.begin(); it!=mNnet.end(); ++it,++it2) { + if((*it)->IsUpdatable()) { + UpdatableComponent& comp = dynamic_cast(**it); + const UpdatableComponent& comp2 = dynamic_cast(**it2); + comp.AccuGradient(comp2,thr,thrN); + } + } +} + + +void Network::Update(int thr, int thrN) { + LayeredType::iterator it; + + for(it=mNnet.begin(); it!=mNnet.end(); ++it) { + if((*it)->IsUpdatable()) { + UpdatableComponent& comp = dynamic_cast(**it); + comp.Update(thr,thrN); + } + } +} + + +Network* Network::Clone() { + Network* net = new Network; + LayeredType::iterator it; + for(it = mNnet.begin(); it != mNnet.end(); ++it) { + //clone + net->mNnet.push_back((*it)->Clone()); + //connect network + if(net->mNnet.size() > 1) { + Component* last = *(net->mNnet.end()-1); + Component* prev = *(net->mNnet.end()-2); + last->SetInput(prev->GetOutput()); + prev->SetErrorInput(last->GetErrorOutput()); + } + } + + //copy the learning rate + //net->SetLearnRate(GetLearnRate()); + + return net; +} + + +void Network::ReadNetwork(const char* pSrc) { + std::ifstream in(pSrc); + if(!in.good()) { + Error(std::string("Error, cannot read model: ")+pSrc); + } + ReadNetwork(in); + in.close(); +} + + + +void Network::ReadNetwork(std::istream& rIn) { + //get the network elements from a factory + Component *pComp; + while(NULL != (pComp = ComponentFactory(rIn))) + mNnet.push_back(pComp); +} + + +void Network::WriteNetwork(const char* pDst) { + std::ofstream out(pDst); + if(!out.good()) { + Error(std::string("Error, cannot write model: ")+pDst); + } + WriteNetwork(out); + out.close(); +} + + +void Network::WriteNetwork(std::ostream& rOut) { + //dump all the componetns + LayeredType::iterator it; + for(it=mNnet.begin(); it!=mNnet.end(); ++it) { + ComponentDumper(rOut, **it); + } +} + + +Component* +Network:: +ComponentFactory(std::istream& rIn) +{ + rIn >> std::ws; + if(rIn.eof()) return NULL; + + Component* pRet=NULL; + Component* pPred=NULL; + + std::string componentTag; + size_t nInputs, nOutputs; + + rIn >> std::ws; + rIn >> componentTag; + if(componentTag == "") return NULL; //nothing left in the file + + //make it lowercase + std::transform(componentTag.begin(), componentTag.end(), + componentTag.begin(), tolower); + + //the 'endblock' tag terminates the network + if(componentTag == "") return NULL; + + + if(componentTag[0] != '<' || componentTag[componentTag.size()-1] != '>') { + Error(std::string("Invalid component tag:")+componentTag); + } + + rIn >> std::ws; + rIn >> nOutputs; + rIn >> std::ws; + rIn >> nInputs; + assert(nInputs > 0 && nOutputs > 0); + + //make coupling with predecessor + if(mNnet.size() == 0) { + pPred = NULL; + } else { + pPred = mNnet.back(); + } + + //array with list of component tags + static const std::string TAGS[] = { + "", + "", + + "", + "", + "", + + "", + "", + "", + "", + "", + "", + "", + + "", + }; + + static const int n_tags = sizeof(TAGS) / sizeof(TAGS[0]); + int i = 0; + for(i=0; iReadFromStream(rIn); + //return + return pRet; +} + + +void +Network:: +ComponentDumper(std::ostream& rOut, Component& rComp) +{ + //use tags of all the components; or the identification codes + //array with list of component tags + static const Component::ComponentType TYPES[] = { + Component::BIASED_LINEARITY, + Component::SHARED_LINEARITY, + + Component::SIGMOID, + Component::SOFTMAX, + Component::BLOCK_SOFTMAX, + + Component::EXPAND, + Component::COPY, + Component::TRANSPOSE, + Component::BLOCK_LINEARITY, + Component::BIAS, + Component::WINDOW, + Component::LOG, + + Component::BLOCK_ARRAY, + }; + static const std::string TAGS[] = { + "", + "", + + "", + "", + "", + + "", + "", + "", + "", + "", + "", + "", + + "", + }; + static const int MAX = sizeof TYPES / sizeof TYPES[0]; + + int i; + for(i=0; i + + +namespace TNet { + +class Network +{ +////////////////////////////////////// +// Typedefs +typedef std::vector LayeredType; + + ////////////////////////////////////// + // Disable copy construction and assignment + private: + Network(Network&); + Network& operator=(Network&); + + public: + // allow incomplete network creation + Network() + { } + + ~Network(); + + int Layers() const + { return mNnet.size(); } + + Component& Layer(int i) + { return *mNnet[i]; } + + const Component& Layer(int i) const + { return *mNnet[i]; } + + /// Feedforward the data per blocks, this needs less memory, + /// and allows to process very long files. + /// It does not trim the *_frm_ext, but uses it + /// for concatenation of segments + void Feedforward(const Matrix& in, Matrix& out, + size_t start_frm_ext, size_t end_frm_ext); + /// forward the data to the output + void Propagate(const Matrix& in, Matrix& out); + /// backpropagate the error while calculating the gradient + void Backpropagate(const Matrix& globerr); + + /// accumulate the gradient from other networks + void AccuGradient(const Network& src, int thr, int thrN); + /// update weights, reset the accumulator + void Update(int thr, int thrN); + + Network* Clone(); ///< Clones the network + + void ReadNetwork(const char* pSrc); ///< read the network from file + void ReadNetwork(std::istream& rIn); ///< read the network from stream + void WriteNetwork(const char* pDst); ///< write network to file + void WriteNetwork(std::ostream& rOut); ///< write network to stream + + size_t GetNInputs() const; ///< Dimensionality of the input features + size_t GetNOutputs() const; ///< Dimensionality of the desired vectors + + void SetLearnRate(BaseFloat learnRate); ///< set the learning rate value + BaseFloat GetLearnRate(); ///< get the learning rate value + + void SetWeightcost(BaseFloat l2); ///< set the L2 regularization const + + void ResetBunchsize(); ///< reset the frame counter (needed for L2 regularization + void AccuBunchsize(const Network& src); ///< accumulate frame counts in bunch (needed in L2 regularization + + private: + /// Creates a component by reading from stream + Component* ComponentFactory(std::istream& In); + /// Dumps component into a stream + void ComponentDumper(std::ostream& rOut, Component& rComp); + + private: + LayeredType mNnet; ///< container with the network layers + +}; + + +////////////////////////////////////////////////////////////////////////// +// INLINE FUNCTIONS +// Network:: +inline Network::~Network() { + //delete all the components + LayeredType::iterator it; + for(it=mNnet.begin(); it!=mNnet.end(); ++it) { + delete *it; + } +} + + +inline size_t Network::GetNInputs() const { + assert(mNnet.size() > 0); + return mNnet.front()->GetNInputs(); +} + + +inline size_t +Network:: +GetNOutputs() const +{ + assert(mNnet.size() > 0); + return mNnet.back()->GetNOutputs(); +} + + + +inline void +Network:: +SetLearnRate(BaseFloat learnRate) +{ + LayeredType::iterator it; + for(it=mNnet.begin(); it!=mNnet.end(); ++it) { + if((*it)->IsUpdatable()) { + dynamic_cast(*it)->LearnRate(learnRate); + } + } +} + + +inline BaseFloat +Network:: +GetLearnRate() +{ + //TODO - learn rates may differ layer to layer + assert(mNnet.size() > 0); + for(size_t i=0; iIsUpdatable()) { + return dynamic_cast(mNnet[i])->LearnRate(); + } + } + Error("No updatable NetComponents"); + return -1; +} + + +inline void +Network:: +SetWeightcost(BaseFloat l2) +{ + LayeredType::iterator it; + for(it=mNnet.begin(); it!=mNnet.end(); ++it) { + if((*it)->IsUpdatable()) { + dynamic_cast(*it)->Weightcost(l2); + } + } +} + + +inline void +Network:: +ResetBunchsize() +{ + LayeredType::iterator it; + for(it=mNnet.begin(); it!=mNnet.end(); ++it) { + if((*it)->IsUpdatable()) { + dynamic_cast(*it)->Bunchsize(0); + } + } +} + +inline void +Network:: +AccuBunchsize(const Network& src) +{ + assert(Layers() == src.Layers()); + assert(Layers() > 0); + + for(int i=0; i(Layer(i)); + const UpdatableComponent& src_comp = dynamic_cast(src.Layer(i)); + tgt_comp.Bunchsize(tgt_comp.Bunchsize()+src_comp.GetOutput().Rows()); + } + } +} + + + +} //namespace + +#endif + + diff --git a/src/TNetLib/.svn/text-base/ObjFun.cc.svn-base b/src/TNetLib/.svn/text-base/ObjFun.cc.svn-base new file mode 100644 index 0000000..c899fb1 --- /dev/null +++ b/src/TNetLib/.svn/text-base/ObjFun.cc.svn-base @@ -0,0 +1,231 @@ + +#include "ObjFun.h" +#include "Error.h" + +#include + +namespace TNet { + + +ObjectiveFunction* ObjectiveFunction::Factory(ObjFunType type) { + ObjectiveFunction* ret = NULL; + switch(type) { + case MEAN_SQUARE_ERROR: ret = new MeanSquareError; break; + case CROSS_ENTROPY: ret = new CrossEntropy; break; + default: Error("Unknown ObjectiveFunction type"); + } + return ret; +} + + +/* + * MeanSquareError + */ +void MeanSquareError::Evaluate(const Matrix& net_out, const Matrix& target, Matrix* err) { + + //check dimensions + assert(net_out.Rows() == target.Rows()); + assert(net_out.Cols() == target.Cols()); + if(err->Rows() != net_out.Rows() || err->Cols() != net_out.Cols()) { + err->Init(net_out.Rows(),net_out.Cols()); + } + + //compute global gradient + err->Copy(net_out); + err->AddScaled(-1,target); + + //compute loss function + double sum = 0; + for(size_t r=0; rRows(); r++) { + for(size_t c=0; cCols(); c++) { + BaseFloat val = (*err)(r,c); + sum += val*val; + } + } + error_ += sum/2.0; + frames_ += net_out.Rows(); +} + + +std::string MeanSquareError::Report() { + std::stringstream ss; + ss << "Mse:" << error_ << " frames:" << frames_ + << " err/frm:" << error_/frames_ + << "\n"; + return ss.str(); +} + + +/* + * CrossEntropy + */ + +///Find maximum in float array +inline int FindMaxId(const BaseFloat* ptr, size_t N) { + BaseFloat mval = -1e20f; + int mid = -1; + for(size_t i=0; i mval) { + mid = i; mval = ptr[i]; + } + } + return mid; +} + + +void +CrossEntropy::Evaluate(const Matrix& net_out, const Matrix& target, Matrix* err) +{ + //check dimensions + assert(net_out.Rows() == target.Rows()); + assert(net_out.Cols() == target.Cols()); + if(err->Rows() != net_out.Rows() || err->Cols() != net_out.Cols()) { + err->Init(net_out.Rows(),net_out.Cols()); + } + + //allocate confunsion buffers + if(confusion_mode_ != NO_CONF) { + if(confusion_.Rows() != target.Cols() || confusion_.Cols() != target.Cols()) { + confusion_.Init(target.Cols(),target.Cols()); + confusion_count_.Init(target.Cols()); + diag_confusion_.Init(target.Cols()); + } + } + + //compute global gradient (assuming on softmax input) + err->Copy(net_out); + err->AddScaled(-1,target); + + //collect max values + std::vector max_target_id(target.Rows()); + std::vector max_netout_id(target.Rows()); + //check correct classification + int corr = 0; + for(size_t r=0; r tag; + { + std::ifstream ifs(output_label_map_); + assert(ifs.good()); + std::string str; + while(!ifs.eof()) { + ifs >> str; + tag.push_back(str); + } + } + assert(confusion_count_.Dim() <= tag.size()); + + //print confusion matrix + if(confusion_mode_ == MAX_CONF || confusion_mode_ == SOFT_CONF) { + ss << "Row:label Col:hyp\n" << confusion_ << "\n"; + } + + //***print per-target accuracies + for(int i=0; i(inst); + frames_ += xent.frames_; error_ += xent.error_; corr_ += xent.corr_; + //sum the confustion statistics + if(confusion_mode_ != NO_CONF) { + if(confusion_.Rows() != xent.confusion_.Rows()) { + confusion_.Init(xent.confusion_.Rows(),xent.confusion_.Cols()); + confusion_count_.Init(xent.confusion_count_.Dim()); + diag_confusion_.Init(xent.diag_confusion_.Dim()); + } + confusion_.Add(xent.confusion_); + confusion_count_.Add(xent.confusion_count_); + diag_confusion_.Add(xent.diag_confusion_); + } +} + + +} // namespace TNet diff --git a/src/TNetLib/.svn/text-base/ObjFun.h.svn-base b/src/TNetLib/.svn/text-base/ObjFun.h.svn-base new file mode 100644 index 0000000..c458340 --- /dev/null +++ b/src/TNetLib/.svn/text-base/ObjFun.h.svn-base @@ -0,0 +1,160 @@ +#ifndef _TNET_OBJ_FUN_H +#define _TNET_OBJ_FUN_H + +#include +#include +#include + +#include "Matrix.h" +#include "Vector.h" + +namespace TNet { + + /** + * General interface for objective functions + */ + class ObjectiveFunction + { + public: + /// Enum with objective function types + typedef enum { + OBJ_FUN_I = 0x0300, + MEAN_SQUARE_ERROR, + CROSS_ENTROPY, + } ObjFunType; + + public: + /// Factory for creating objective function instances + static ObjectiveFunction* Factory(ObjFunType type); + + ////////////////////////////////////////////////////////////// + // Interface specification + protected: + ObjectiveFunction() { }; /// constructor + public: + virtual ~ObjectiveFunction() { }; /// destructor + + virtual ObjFunType GetType() = 0; + virtual const char* GetName() = 0; + virtual ObjectiveFunction* Clone() = 0; + + ///calculate error of network output + virtual void Evaluate(const Matrix& net_out, const Matrix& target, Matrix* err) = 0; + + ///get the accumulated error + virtual double GetError() = 0; + ///the number of processed frames + virtual size_t GetFrames() = 0; + + ///report the error to string + virtual std::string Report() = 0; + + ///sum the frame counts from more instances + virtual void MergeStats(const ObjectiveFunction& inst) = 0; + }; + + + + /** + * Mean square error function + */ + class MeanSquareError : public ObjectiveFunction + { + public: + MeanSquareError() + : ObjectiveFunction(), frames_(0), error_(0) + { } + + ~MeanSquareError() + { } + + ObjFunType GetType() + { return MEAN_SQUARE_ERROR; } + + const char* GetName() + { return ""; } + + ObjectiveFunction* Clone() + { return new MeanSquareError(*this); } + + void Evaluate(const Matrix& net_out, const Matrix& target, Matrix* err); + + size_t GetFrames() + { return frames_; } + + double GetError() + { return error_; } + + std::string Report(); + + void MergeStats(const ObjectiveFunction& inst) { + const MeanSquareError& mse = dynamic_cast(inst); + frames_ += mse.frames_; error_ += mse.error_; + } + + private: + size_t frames_; + double error_; + + }; + + + /** + * Cross entropy error function + */ + class CrossEntropy : public ObjectiveFunction + { + public: + enum ConfusionMode { NO_CONF=0, MAX_CONF, SOFT_CONF, DIAG_MAX_CONF, DIAG_SOFT_CONF }; + + public: + CrossEntropy() + : ObjectiveFunction(), frames_(0), error_(0), corr_(0), confusion_mode_(NO_CONF), output_label_map_(NULL) + { } + + ~CrossEntropy() + { } + + ObjFunType GetType() + { return CROSS_ENTROPY; } + + const char* GetName() + { return ""; } + + ObjectiveFunction* Clone() + { return new CrossEntropy(*this); } + + void Evaluate(const Matrix& net_out, const Matrix& target, Matrix* err); + + size_t GetFrames() + { return frames_; } + + double GetError() + { return error_; } + + void SetConfusionMode(enum ConfusionMode m) + { confusion_mode_ = m; } + + void SetOutputLabelMap(const char* map) + { output_label_map_ = map; } + + std::string Report(); + + void MergeStats(const ObjectiveFunction& inst); + private: + size_t frames_; + double error_; + size_t corr_; + + ConfusionMode confusion_mode_; + Matrix confusion_; + Vector confusion_count_; + Vector diag_confusion_; + const char* output_label_map_; + }; + + +} //namespace TNet + + +#endif diff --git a/src/TNetLib/.svn/text-base/Platform.h.svn-base b/src/TNetLib/.svn/text-base/Platform.h.svn-base new file mode 100644 index 0000000..66ebacb --- /dev/null +++ b/src/TNetLib/.svn/text-base/Platform.h.svn-base @@ -0,0 +1,397 @@ +#ifndef _TNET_PLATFORM_H +#define _TNET_PLATFORM_H + +#include "Thread.h" +#include "Matrix.h" + +#include "Features.h" +#include "Labels.h" + +#include "Cache.h" +#include "Nnet.h" +#include "ObjFun.h" + +#include "Mutex.h" +#include "Semaphore.h" +#include "Barrier.h" +#include "Thread.h" + +#include +#include +#include + +namespace TNet { + +class PlatformThread; + +class Platform { + +/* +* Variables to be initialized directly from the main function +*/ +public: + FeatureRepository feature_; + LabelRepository label_; + + Network nnet_transf_; + Network nnet_; + ObjectiveFunction* obj_fun_; + + int bunchsize_; + int cachesize_; + bool randomize_; + + int start_frm_ext_; + int end_frm_ext_; + + int trace_; + bool crossval_; + + long int seed_; + + /* + * Variables to be used internally during the multi-threaded training + */ + private: + Semaphore semaphore_read_; + + std::vector*> > feature_buf_; + std::vector*> > label_buf_; + std::vector mutex_buf_; + + std::vector nnet_transf2_; + + std::vector cache_; + + std::vector nnet2_; + std::vector obj_fun2_; + std::vector sync_mask_; + + Barrier barrier_; + bool end_reading_; + std::vector tim_; + std::vector tim_accu_; + + int num_thr_; + Semaphore semaphore_endtrain_; + Semaphore semaphore_endtrain2_; + + public: + Mutex cout_mutex_; + + /* + * Methods + */ + public: + Platform() + : bunchsize_(0), cachesize_(0), randomize_(false), + start_frm_ext_(0), end_frm_ext_(0), trace_(0), + crossval_(false), seed_(0), + end_reading_(false), num_thr_(0) + { } + + ~Platform() + { + for(size_t i=0; i(arg); + platform_.Thread(thr_id); + } + + private: + Platform& platform_; +}; + + + + + +void Platform::RunTrain(int num_thr) { + num_thr_ = num_thr; + + /* + * Initialize parallel training + */ + feature_buf_.resize(num_thr); + label_buf_.resize(num_thr); + mutex_buf_.resize(num_thr); + cache_.resize(num_thr); + sync_mask_.resize(num_thr); + barrier_.SetThreshold(num_thr); + + tim_.resize(num_thr); + tim_accu_.resize(num_thr,0.0); + + int bunchsize = bunchsize_/num_thr; + int cachesize = (cachesize_/num_thr/bunchsize)*bunchsize; + std::cout << "Bunchsize:" << bunchsize << "*" << num_thr << "=" << bunchsize*num_thr + << " Cachesize:" << cachesize << "*" << num_thr << "=" << cachesize*num_thr << "\n"; + for(int i=0; iClone()); + //enable threads to sync weights + sync_mask_[i] = true; + } + + /* + * Run training threads + */ + std::vector threads; + for(intptr_t i=0; iStart(reinterpret_cast(i)); + threads.push_back(t); + } + + /* + * Read the training data + */ + ReadData(); + + /* + * Wait for training to finish + */ + semaphore_endtrain2_.Wait(); + +} + + + +void Platform::ReadData() try { + cout_mutex_.Lock(); + std::cout << "queuesize " << feature_.QueueSize() << "\n"; + cout_mutex_.Unlock(); + + int thr = 0; + for(feature_.Rewind();!feature_.EndOfList();feature_.MoveNext()) { + Matrix* fea = new Matrix; + Matrix* lab = new Matrix; + + feature_.ReadFullMatrix(*fea); + label_.GenDesiredMatrix(*lab, + fea->Rows()-start_frm_ext_-end_frm_ext_, + feature_.CurrentHeader().mSamplePeriod, + feature_.Current().Logical().c_str()); + + + fea->CheckData(feature_.Current().Logical()); + + mutex_buf_[thr].Lock(); + feature_buf_[thr].push_back(fea); + label_buf_[thr].push_back(lab); + mutex_buf_[thr].Unlock(); + + //suspend reading when shortest buffer has 50 matrices + if(thr == 0) { + int minsize=1e6; + for(size_t i=0; i 20) semaphore_read_.Wait(); + } + + thr = (thr+1) % num_thr_; + } + + std::cout << "[Reading finished]\n" << std::flush; + end_reading_ = true; + +} catch (std::exception& rExc) { + std::cerr << "Exception thrown" << std::endl; + std::cerr << rExc.what() << std::endl; + exit(1); +} + +void Platform::Thread(int thr_id) try { + + const int thr = thr_id; //make id const for safety! + + while(1) { + //fill the cache + while(!cache_[thr].Full() && !(end_reading_ && (feature_buf_[thr].size() == 0))) { + + if(feature_buf_[thr].size() <= 5) { + semaphore_read_.Post();//wake the reader + } + if(feature_buf_[thr].size() == 0) { + cout_mutex_.Lock(); + std::cout << "Thread" << thr << ",waiting for data\n"; + cout_mutex_.Unlock(); + sleep(1); + } else { + //get the matrices + mutex_buf_[thr].Lock(); + Matrix* fea = feature_buf_[thr].front(); + Matrix* lab = label_buf_[thr].front(); + feature_buf_[thr].pop_front(); + label_buf_[thr].pop_front(); + mutex_buf_[thr].Unlock(); + + //transform the features + Matrix fea_transf; + nnet_transf2_[thr]->Propagate(*fea,fea_transf); + + //trim the ext + SubMatrix fea_trim( + fea_transf, + start_frm_ext_, + fea_transf.Rows()-start_frm_ext_-end_frm_ext_, + 0, + fea_transf.Cols() + ); + + //add to cache + cache_[thr].AddData(fea_trim,*lab); + + delete fea; delete lab; + } + } + + //no more data, end training... + if(cache_[thr].Empty()) break; + + if(randomize_) { cache_[thr].Randomize(); } + + + //std::cout << "Thread" << thr << ", Cache#" << nr_cache++ << "\n"; + + //train from cache + Matrix fea2,lab2,out,err; + while(!cache_[thr].Empty()) { + cache_[thr].GetBunch(fea2,lab2); + nnet2_[thr]->Propagate(fea2,out); + obj_fun2_[thr]->Evaluate(out,lab2,&err); + + if(!crossval_) { + nnet2_[thr]->Backpropagate(err); + + tim_[thr].Start(); + barrier_.Wait();//*********/ + tim_[thr].End(); tim_accu_[thr] += tim_[thr].Val(); + + //sum the gradient and bunchsize + for(int i=0; iMergeStats(*obj_fun2_[i]); + } + + cout_mutex_.Lock(); + std::cout << "Barrier waiting times per thread\n"; + std::copy(tim_accu_.begin(),tim_accu_.end(),std::ostream_iterator(std::cout," ")); + std::cout << "\n"; + cout_mutex_.Unlock(); + } + + cout_mutex_.Lock(); + std::cout << "[Thread" << thr << " finished]\n"; + cout_mutex_.Unlock(); + + if(thr == 0) { + semaphore_endtrain2_.Post(); + } +} catch (std::exception& rExc) { + std::cerr << "Exception thrown" << std::endl; + std::cerr << rExc.what() << std::endl; + exit(1); +} + + + +}//namespace TNet + +#endif diff --git a/src/TNetLib/.svn/text-base/Semaphore.cc.svn-base b/src/TNetLib/.svn/text-base/Semaphore.cc.svn-base new file mode 100644 index 0000000..d149fb3 --- /dev/null +++ b/src/TNetLib/.svn/text-base/Semaphore.cc.svn-base @@ -0,0 +1,64 @@ + +#include "Semaphore.h" + +namespace TNet { + + Semaphore:: + Semaphore(int initValue) + { + mSemValue = initValue; + pthread_mutex_init(&mMutex, NULL); + pthread_cond_init(&mCond, NULL); + } + + Semaphore:: + ~Semaphore() + { + pthread_mutex_destroy(&mMutex); + pthread_cond_destroy(&mCond); + } + + int + Semaphore:: + TryWait() + { + pthread_mutex_lock(&mMutex); + if(mSemValue > 0) { + mSemValue--; + pthread_mutex_unlock(&mMutex); + return 0; + } + pthread_mutex_unlock(&mMutex); + return -1; + } + + void + Semaphore:: + Wait() + { + pthread_mutex_lock(&mMutex); + while(mSemValue <= 0) { + pthread_cond_wait(&mCond, &mMutex); + } + mSemValue--; + pthread_mutex_unlock(&mMutex); + } + + void + Semaphore:: + Post() + { + pthread_mutex_lock(&mMutex); + mSemValue++; + pthread_cond_signal(&mCond); + pthread_mutex_unlock(&mMutex); + } + + int + Semaphore:: + GetValue() + { return mSemValue; } + + + +} //namespace diff --git a/src/TNetLib/.svn/text-base/Semaphore.h.svn-base b/src/TNetLib/.svn/text-base/Semaphore.h.svn-base new file mode 100644 index 0000000..a28ee44 --- /dev/null +++ b/src/TNetLib/.svn/text-base/Semaphore.h.svn-base @@ -0,0 +1,26 @@ +#ifndef _SEMPAHORE_H_ +#define _SEMPAHORE_H_ + +#include + +namespace TNet { + + class Semaphore { + public: + Semaphore(int initValue = 0); + ~Semaphore(); + + int TryWait(); + void Wait(); + void Post(); + int GetValue(); + + private: + int mSemValue; + pthread_mutex_t mMutex; + pthread_cond_t mCond; + + }; +} //namespace + +#endif diff --git a/src/TNetLib/.svn/text-base/SharedLinearity.cc.svn-base b/src/TNetLib/.svn/text-base/SharedLinearity.cc.svn-base new file mode 100644 index 0000000..108212c --- /dev/null +++ b/src/TNetLib/.svn/text-base/SharedLinearity.cc.svn-base @@ -0,0 +1,277 @@ + + +#include "SharedLinearity.h" +#include "cblas.h" + +namespace TNet { + +void +SharedLinearity:: +PropagateFnc(const Matrix& X, Matrix& Y) +{ + //precopy bias + for(int k=0; kDim(),mpBias->pData(),mpBias->Dim()*sizeof(BaseFloat)); + } + } + + //multiply blockwise + for(int k=0; k xblock(X,0,X.Rows(),k*mpLinearity->Rows(),mpLinearity->Rows()); + SubMatrix yblock(Y,0,Y.Rows(),k*mpLinearity->Cols(),mpLinearity->Cols()); + yblock.BlasGemm(1.0,xblock,NO_TRANS,*mpLinearity,NO_TRANS,1.0); + } +} + + +void +SharedLinearity:: +BackpropagateFnc(const Matrix& X, Matrix& Y) +{ + for(int k=0; k xblock(X,0,X.Rows(),k*mpLinearity->Cols(),mpLinearity->Cols()); + SubMatrix yblock(Y,0,Y.Rows(),k*mpLinearity->Rows(),mpLinearity->Rows()); + yblock.BlasGemm(1.0,xblock,NO_TRANS,*mpLinearity,TRANS,1.0); + } +} + +#if 0 +void +SharedLinearity:: +AccuUpdate() +{ + BaseFloat N = 1; + /* + //Not part of the interface!!! + if(mGradDivFrm) { + N = static_cast(GetInput().Rows()); + } + */ + BaseFloat mmt_gain = static_cast(1.0/(1.0-mMomentum)); + N *= mmt_gain; //compensate higher gradient estimates due to momentum + + //compensate augmented dyn. range of gradient caused by multiple instances + N *= static_cast(mNInstances); + + const Matrix& X = GetInput().Data(); + const Matrix& E = GetErrorInput().Data(); + //get gradient of shared linearity + for(int k=0; k xblock(X,0,X.Rows(),k*mLinearity.Rows(),mLinearity.Rows()); + SubMatrix eblock(E,0,E.Rows(),k*mLinearity.Cols(),mLinearity.Cols()); + mLinearityCorrection.BlasGemm(1.0,xblock,TRANS,eblock,NO_TRANS,((k==0)?mMomentum:1.0f)); + } + + //get gradient of shared bias + mBiasCorrection.Scale(mMomentum); + for(int r=0; r> std::ws >> mNInstances; + if(mNInstances < 1) { + std::ostringstream os; + os << "Bad number of instances:" << mNInstances; + Error(os.str()); + } + if(GetNInputs() % mNInstances != 0 || GetNOutputs() % mNInstances != 0) { + std::ostringstream os; + os << "Number of Inputs/Outputs must be divisible by number of instances" + << " Inputs:" << GetNInputs() + << " Outputs" << GetNOutputs() + << " Intances:" << mNInstances; + Error(os.str()); + } + + //matrix is stored transposed as SNet does + BfMatrix transpose; + rIn >> transpose; + mLinearity = BfMatrix(transpose, TRANS); + //biases stored normally + rIn >> mBias; + + if(transpose.Cols()*transpose.Rows() == 0) { + Error("Missing linearity matrix in network file"); + } + if(mBias.Dim() == 0) { + Error("Missing bias vector in network file"); + } + + + if(mLinearity.Cols() != (GetNOutputs() / mNInstances) || + mLinearity.Rows() != (GetNInputs() / mNInstances) || + mBias.Dim() != (GetNOutputs() / mNInstances) + ){ + std::ostringstream os; + os << "Wrong dimensionalities of matrix/vector in network file\n" + << "Inputs:" << GetNInputs() + << " Outputs:" << GetNOutputs() + << "\n" + << "N-Instances:" << mNInstances + << "\n" + << "linearityCols:" << mLinearity.Cols() << "(" << mLinearity.Cols()*mNInstances << ")" + << " linearityRows:" << mLinearity.Rows() << "(" << mLinearity.Rows()*mNInstances << ")" + << " biasDims:" << mBias.Dim() << "(" << mBias.Dim()*mNInstances << ")" + << "\n"; + Error(os.str()); + } + + mLinearityCorrection.Init(mLinearity.Rows(),mLinearity.Cols()); + mBiasCorrection.Init(mBias.Dim()); +} + + +void +SharedLinearity:: +WriteToStream(std::ostream& rOut) +{ + rOut << mNInstances << std::endl; + //matrix is stored transposed as SNet does + BfMatrix transpose(mLinearity, TRANS); + rOut << transpose; + //biases stored normally + rOut << mBias; + rOut << std::endl; +} + + +void +SharedLinearity:: +Gradient() +{ + const Matrix& X = GetInput(); + const Matrix& E = GetErrorInput(); + //get gradient of shared linearity + for(int k=0; k xblock(X,0,X.Rows(),k*mpLinearity->Rows(),mpLinearity->Rows()); + SubMatrix eblock(E,0,E.Rows(),k*mpLinearity->Cols(),mpLinearity->Cols()); + mLinearityCorrection.BlasGemm(1.0,xblock,TRANS,eblock,NO_TRANS,((k==0)?0.0f:1.0f)); + } + + //get gradient of shared bias + mBiasCorrection.Set(0.0f); + for(int r=0; r(src); + + //allocate accumulators when needed + if(mLinearityCorrectionAccu.MSize() == 0) { + mLinearityCorrectionAccu.Init(mpLinearity->Rows(),mpLinearity->Cols()); + } + if(mBiasCorrectionAccu.MSize() == 0) { + mBiasCorrectionAccu.Init(mpBias->Dim()); + } + + + //assert the dimensions + /* + assert(mLinearityCorrection.Rows() == src_comp.mLinearityCorrection.Rows()); + assert(mLinearityCorrection.Cols() == src_comp.mLinearityCorrection.Cols()); + assert(mBiasCorrection.Dim() == src_comp.mBiasCorrection.Dim()); + */ + + //need to find out which rows to sum... + int div = mLinearityCorrection.Rows() / thrN; + int mod = mLinearityCorrection.Rows() % thrN; + + int origin = thr * div + ((mod > thr)? thr : mod); + int rows = div + ((mod > thr)? 1 : 0); + + //std::cout << "[S" << thr << "," << origin << "," << rows << "]" << std::flush; + + //create the matrix windows + const SubMatrix src_mat ( + src_comp.mLinearityCorrection, + origin, rows, + 0, mLinearityCorrection.Cols() + ); + SubMatrix tgt_mat ( + mLinearityCorrectionAccu, + origin, rows, + 0, mLinearityCorrection.Cols() + ); + //sum the rows + Add(tgt_mat,src_mat); + + //first thread will always sum the bias correction and adds frame count + if(thr == 0) { + //std::cout << "[BS" << thr << "]" << std::flush; + Add(mBiasCorrectionAccu,src_comp.mBiasCorrection); + } +} + + +void +SharedLinearity:: +Update(int thr, int thrN) +{ + //need to find out which rows to sum... + int div = mLinearity.Rows() / thrN; + int mod = mLinearity.Rows() % thrN; + + int origin = thr * div + ((mod > thr)? thr : mod); + int rows = div + ((mod > thr)? 1 : 0); + + //std::cout << "[P" << thr << "," << origin << "," << rows << "]" << std::flush; + + //get the matrix windows + SubMatrix src_mat ( + mLinearityCorrectionAccu, + origin, rows, + 0, mLinearityCorrection.Cols() + ); + SubMatrix tgt_mat ( + mLinearity, + origin, rows, + 0, mLinearityCorrection.Cols() + ); + + //TODO perform L2 regularization + //tgt_mat.AddScaled(tgt_mat, -mWeightcost * num_frames); + + //update weights + AddScaled(tgt_mat, src_mat, -mLearningRate/static_cast(mNInstances)); + + //first thread always update bias + if(thr == 0) { + //std::cout << "[" << thr << "BP]" << std::flush; + AddScaled(mBias, mBiasCorrectionAccu, -mLearningRate/static_cast(mNInstances)); + } + + //reset the accumulators + src_mat.Zero(); + if(thr == 0) { + mBiasCorrectionAccu.Zero(); + } +} + + +} //namespace diff --git a/src/TNetLib/.svn/text-base/SharedLinearity.h.svn-base b/src/TNetLib/.svn/text-base/SharedLinearity.h.svn-base new file mode 100644 index 0000000..83feeee --- /dev/null +++ b/src/TNetLib/.svn/text-base/SharedLinearity.h.svn-base @@ -0,0 +1,103 @@ +#ifndef _CUSHARED_LINEARITY_H_ +#define _CUSHARED_LINEARITY_H_ + + +#include "Component.h" + +#include "Matrix.h" +#include "Vector.h" + + +namespace TNet { + +class SharedLinearity : public UpdatableComponent +{ + public: + SharedLinearity(size_t nInputs, size_t nOutputs, Component *pPred); + ~SharedLinearity(); + + ComponentType GetType() const + { return SHARED_LINEARITY; } + + const char* GetName() const + { return ""; } + + Component* Clone() const; + + void PropagateFnc(const Matrix& X, Matrix& Y); + void BackpropagateFnc(const Matrix& X, Matrix& Y); + + void ReadFromStream(std::istream& rIn); + void WriteToStream(std::ostream& rOut); + + /// calculate gradient + void Gradient(); + /// accumulate gradient from other components + void AccuGradient(const UpdatableComponent& src, int thr, int thrN); + /// update weights, reset the accumulator + void Update(int thr, int thrN); + +protected: + Matrix mLinearity; ///< Matrix with neuron weights + Vector mBias; ///< Vector with biases + + Matrix* mpLinearity; + Vector* mpBias; + + Matrix mLinearityCorrection; ///< Matrix for linearity updates + Vector mBiasCorrection; ///< Vector for bias updates + + Matrix mLinearityCorrectionAccu; ///< Accumulator for linearity updates + Vector mBiasCorrectionAccu; ///< Accumulator for bias updates + + int mNInstances; +}; + + + + +//////////////////////////////////////////////////////////////////////////// +// INLINE FUNCTIONS +// SharedLinearity:: +inline +SharedLinearity:: +SharedLinearity(size_t nInputs, size_t nOutputs, Component *pPred) + : UpdatableComponent(nInputs, nOutputs, pPred), + mpLinearity(&mLinearity), mpBias(&mBias), + mNInstances(0) +{ } + + +inline +SharedLinearity:: +~SharedLinearity() +{ } + + +inline +Component* +SharedLinearity:: +Clone() const +{ + SharedLinearity* ptr = new SharedLinearity(GetNInputs(),GetNOutputs(),NULL); + ptr->mpLinearity = mpLinearity; + ptr->mpBias = mpBias; + + ptr->mLinearityCorrection.Init(mpLinearity->Rows(),mpLinearity->Cols()); + ptr->mBiasCorrection.Init(mpBias->Dim()); + + ptr->mNInstances = mNInstances; + + ptr->mLearningRate = mLearningRate; + + + return ptr; +} + + + +} //namespace + + + +#endif diff --git a/src/TNetLib/.svn/text-base/Thread.h.svn-base b/src/TNetLib/.svn/text-base/Thread.h.svn-base new file mode 100644 index 0000000..ba6d7ba --- /dev/null +++ b/src/TNetLib/.svn/text-base/Thread.h.svn-base @@ -0,0 +1,53 @@ +#ifndef _TNET_THREAD_H +#define _TNET_THREAD_H + +namespace TNet { + +class Thread { + public: + Thread() + { } + virtual ~Thread() + { } + + int Start(void* arg); + + protected: + static void* EntryPoint(void*); + virtual void Execute(void*) = 0; ///< Override this function + void* Arg() const { return arg_; } + void Arg(void* a) { arg_ = a; } + + private: + pthread_t thread_id_; + void * arg_; +}; + +int Thread::Start(void * arg) { + Arg(arg); // store user data + + int ret=0; + //create thread as detached (don't wait for it) + pthread_attr_t tattr; + ret |= pthread_attr_init(&tattr); + ret |= pthread_attr_setdetachstate(&tattr,PTHREAD_CREATE_DETACHED); + ret |= pthread_create(&thread_id_, &tattr, &Thread::EntryPoint, this); + if(ret != 0) KALDI_ERR << "Failed to create thread"; + return ret; +} + +/*static */ +void* Thread::EntryPoint(void* pthis) try { + Thread* pt = (Thread*)pthis; + pt->Execute(pt->Arg()); + return NULL; +} catch (std::exception& rExc) { + std::cerr << "Exception thrown" << std::endl; + std::cerr << rExc.what() << std::endl; + exit(1); +} + + +} //namespace TNet + +#endif -- cgit v1.2.3-70-g09d2