From cccccbf6cca94a3eaf813b4468453160e91c332b Mon Sep 17 00:00:00 2001 From: Joe Zhao Date: Mon, 14 Apr 2014 08:14:45 +0800 Subject: First commit --- src/CuBaseLib/cumath.h | 146 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 src/CuBaseLib/cumath.h (limited to 'src/CuBaseLib/cumath.h') diff --git a/src/CuBaseLib/cumath.h b/src/CuBaseLib/cumath.h new file mode 100644 index 0000000..5680082 --- /dev/null +++ b/src/CuBaseLib/cumath.h @@ -0,0 +1,146 @@ +#ifndef _CUMATH_H_ +#define _CUMATH_H_ + +#include "cumatrix.h" + +#include "Timer.h" +#include "cudevice.h" + +namespace TNet { + + + /** + * Group of Math operations for the NN training + */ + template + class CuMath + { + public: + + /// Y = Sigmoid(X) + static void Sigmoid(CuMatrix<_ElemT>& Y, const CuMatrix<_ElemT>& X) + { Error("__func__ Not implemented"); } + + /// Eout = E(1-E) * Y + static void DiffSigmoid(CuMatrix<_ElemT>& Eout, const CuMatrix<_ElemT>& Ein, const CuMatrix<_ElemT>& Y) + { Error("__func__ Not implemented"); } + + /// Y = Softmax(X) + static void Softmax(CuMatrix<_ElemT>& Y, const CuMatrix<_ElemT>& X) + { Error("__func__ Not implemented"); } + + /// for DCT in FeaCat + static void BlockLinearity(CuMatrix<_ElemT>& Y, const CuMatrix<_ElemT>& X, const CuMatrix<_ElemT>& block_transf) + { Error("__func__ Not implemented"); } + + static void Expand(CuMatrix<_ElemT>& Y, const CuMatrix<_ElemT>& X, const CuVector& frameOffsets) + { Error("__func__ Not implemented"); } + + /// ie. switch cols according to copyFrom + static void Rearrange(CuMatrix<_ElemT>& Y, const CuMatrix<_ElemT>& X, const CuVector& copyFrom) + { Error("__func__ Not implemented"); } + + /// ie. switch rows according to copyFrom + static void Randomize(CuMatrix<_ElemT>& Y, const CuMatrix<_ElemT>& X, const CuVector& copyFrom) + { Error("__func__ Not implemented"); } + + /// check match in the classification for Xentropy + static void CheckClass(const CuMatrix<_ElemT>& out, const CuMatrix<_ElemT> &des, CuVector& match) + { Error("__func__ Not implemented"); } + + /// gemm with offset for CuSharedLinearity + static void OffsetGemm(char transA, char transB, _ElemT alpha, const CuMatrix<_ElemT>& A, const CuMatrix<_ElemT>& B, _ElemT beta, CuMatrix<_ElemT>& C, int offA, int offB, int offC) + { Error("__func__ Not implemented"); } + + /// gemv with offset for CuRecurrent + static void OffsetGemv(char trans, _ElemT alpha, const CuMatrix<_ElemT>& A, const _ElemT* x, size_t dimX, _ElemT beta, _ElemT* y, size_t dimY, size_t offsetY) + { Error("__func__ Not implemented"); } + + /// ger for weight updates in CuRecurrent + static void BlasGer(_ElemT alpha, const _ElemT* x, size_t dimX, const _ElemT* y, size_t dimY, CuMatrix<_ElemT>& A) + { Error("__func__ Not implemented"); } + + /// concatenate one vector several times for CuSharedLinearity + static void VecExpand(const CuVector<_ElemT>&in, CuVector<_ElemT>&out) + { Error("__func__ Not implemented"); } + + /// sum the vector as if it was matrix data for CuSharedLinearity + static void VecAddColSum(_ElemT alpha, const CuVector<_ElemT>&in, _ElemT beta, CuVector<_ElemT>&out) + { Error("__func__ Not implemented"); } + + }; //class CuMath:: + + + ////////////////////////////////////////////////////////////////////////////// + //// CuMath<> Template specializations (float) + //// + template<> + void CuMath::Sigmoid(CuMatrix& Y, const CuMatrix& X); + + template<> + void CuMath::DiffSigmoid(CuMatrix& Eout, const CuMatrix& Ein, const CuMatrix& Y); + + template<> + void CuMath::Softmax(CuMatrix& Y, const CuMatrix& X); + + template<> + void CuMath::BlockLinearity(CuMatrix& Y, const CuMatrix& X, const CuMatrix& block_transf); + + template<> + void CuMath::Expand(CuMatrix& Y, const CuMatrix& X, const CuVector& frameOffsets); + + template<> + void CuMath::Rearrange(CuMatrix& Y, const CuMatrix& X, const CuVector& copyFrom); + + template<> + void CuMath::Randomize(CuMatrix& Y, const CuMatrix& X, const CuVector& copyFrom); + + template<> + void CuMath::CheckClass(const CuMatrix& out, const CuMatrix &des, CuVector& match); + + template<> + void CuMath::OffsetGemm(char transA, char transB, float alpha, const CuMatrix& A, const CuMatrix& B, float beta, CuMatrix& C, int offA, int offB, int offC); + + template<> + void CuMath::OffsetGemv(char trans, float alpha, const CuMatrix& A, const float* x, size_t dimX, float beta, float* y, size_t dimY, size_t offsetY); + + template<> + void CuMath::BlasGer(float alpha, const float* x, size_t dimX, const float* y, size_t dimY, CuMatrix& A); + + template<> + void CuMath::VecExpand(const CuVector&in, CuVector&out); + + template<> + void CuMath::VecAddColSum(float alpha, const CuVector&in, float beta, CuVector&out); + + + ////////////////////////////////////////////////////////////////////////////// + //// CuMath<> Template specializations (double) + //// + template<> + void CuMath::Sigmoid(CuMatrix& Y, const CuMatrix& X); + + template<> + void CuMath::DiffSigmoid(CuMatrix& Eout, const CuMatrix& Ein, const CuMatrix& Y); + + template<> + void CuMath::Softmax(CuMatrix& Y, const CuMatrix& X); + + template<> + void CuMath::BlockLinearity(CuMatrix& Y, const CuMatrix& X, const CuMatrix& block_transf); + + template<> + void CuMath::Expand(CuMatrix& Y, const CuMatrix& X, const CuVector& frameOffsets); + + template<> + void CuMath::Rearrange(CuMatrix& Y, const CuMatrix& X, const CuVector& copyFrom); + + template<> + void CuMath::Randomize(CuMatrix& Y, const CuMatrix& X, const CuVector& copyFrom); + + template<> + void CuMath::CheckClass(const CuMatrix& out, const CuMatrix &des, CuVector& match); + +} + +#endif -- cgit v1.2.3-70-g09d2