Commit b3ecba16 by liyinqiao

Update IsSameShaped function.

parent ddeebb47
...@@ -381,7 +381,7 @@ void XMathGrad::GradDiv(XTensor * node, bool isEfficient) ...@@ -381,7 +381,7 @@ void XMathGrad::GradDiv(XTensor * node, bool isEfficient)
XNoder::MakeGrad(a); XNoder::MakeGrad(a);
XNoder::MakeGrad(b); XNoder::MakeGrad(b);
CheckNTErrors(IsSameShaped(a, b), "Wrong sized input tensors!"); CheckNTErrors(_IsSameShaped(a, b), "Wrong sized input tensors!");
_Div(node->grad, b, a->grad, 1.0F); _Div(node->grad, b, a->grad, 1.0F);
...@@ -726,7 +726,7 @@ void XMathGrad::GradMultiply(XTensor * node, bool isEfficient) ...@@ -726,7 +726,7 @@ void XMathGrad::GradMultiply(XTensor * node, bool isEfficient)
XTensor * a = income.tails[0]; XTensor * a = income.tails[0];
XTensor * b = income.tails[1]; XTensor * b = income.tails[1];
CheckNTErrors(IsSameShaped(a, b), "Wrong sized input tensors!"); CheckNTErrors(_IsSameShaped(a, b), "Wrong sized input tensors!");
if (!isEfficient || a->isGrad) { if (!isEfficient || a->isGrad) {
XNoder::MakeGrad(a); XNoder::MakeGrad(a);
......
...@@ -281,7 +281,7 @@ void XShapeGrad::GradMergeList(XTensor * node, bool isEfficient) ...@@ -281,7 +281,7 @@ void XShapeGrad::GradMergeList(XTensor * node, bool isEfficient)
smallsGrad.Add(tail->grad); smallsGrad.Add(tail->grad);
if(i > 1){ if(i > 1){
CheckNTErrors(IsSameShaped(last, tail), CheckNTErrors(_IsSameShaped(last, tail),
"Input tensors must be of the same size!"); "Input tensors must be of the same size!");
} }
......
...@@ -29,7 +29,7 @@ void XNoder::MakeGrad(XTensor * node) ...@@ -29,7 +29,7 @@ void XNoder::MakeGrad(XTensor * node)
if(node == NULL) if(node == NULL)
return; return;
if(!IsSameShaped(node, node->grad)){ if(!_IsSameShaped(node, node->grad)){
delete node->grad; delete node->grad;
node->grad = NewTensor(node); node->grad = NewTensor(node);
node->grad->SetZeroAll(); node->grad->SetZeroAll();
......
...@@ -319,7 +319,7 @@ void T2TSearch::Generate(T2TStateBundle * beam) ...@@ -319,7 +319,7 @@ void T2TSearch::Generate(T2TStateBundle * beam)
for (int i = 0; i < indexGPU.unitNum; i++) for (int i = 0; i < indexGPU.unitNum; i++)
indexGPU.SetInt(i * stride + indexGPU.GetInt(i), i); indexGPU.SetInt(i * stride + indexGPU.GetInt(i), i);
CheckNTErrors(IsSameShaped(&prob, &probPath), "Wrong tensor shape!"); CheckNTErrors(IsSameShaped(prob, probPath), "Wrong tensor shape!");
/* sequence probability of top-k candidates */ /* sequence probability of top-k candidates */
XTensor probPathTopK; XTensor probPathTopK;
......
...@@ -360,7 +360,7 @@ XTensor& XTensor::operator= (const XTensor& tensor) ...@@ -360,7 +360,7 @@ XTensor& XTensor::operator= (const XTensor& tensor)
XLink::ClearOutgoing(this); XLink::ClearOutgoing(this);
XLink::ClearIncoming(this); XLink::ClearIncoming(this);
if(!IsSameShaped(this, &tensor)) if(!_IsSameShaped(this, &tensor))
Resize(tensor.order, tensor.dimSize, tensor.dataType, tensor.denseRatio); Resize(tensor.order, tensor.dimSize, tensor.dataType, tensor.denseRatio);
_Identity(&tensor, this); _Identity(&tensor, this);
......
...@@ -93,5 +93,6 @@ ...@@ -93,5 +93,6 @@
#include "utilities/XMatrixSegment.h" #include "utilities/XMatrixSegment.h"
#include "utilities/FlushToMem.h" #include "utilities/FlushToMem.h"
#include "utilities/CheckData.h" #include "utilities/CheckData.h"
#include "utilities/SetAscendingOrder.h"
#endif // __CHEADER_H__ #endif // __CHEADER_H__
...@@ -169,7 +169,7 @@ int GetDivDimIndex(const XTensor &a, const XTensor &b) ...@@ -169,7 +169,7 @@ int GetDivDimIndex(const XTensor &a, const XTensor &b)
{ {
if(a.order < b.order) if(a.order < b.order)
return -1; return -1;
if(IsSameShaped(&a, &b)) if(IsSameShaped(a, b))
return -1; return -1;
int hitCount = 0; int hitCount = 0;
...@@ -254,7 +254,7 @@ where i is the index of the item ...@@ -254,7 +254,7 @@ where i is the index of the item
*/ */
void Div(const XTensor &a, const XTensor &b, XTensor &c, DTYPE alpha, int leadingDim) void Div(const XTensor &a, const XTensor &b, XTensor &c, DTYPE alpha, int leadingDim)
{ {
if (!c.isInit || !IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -57,7 +57,7 @@ void _DivDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE alp ...@@ -57,7 +57,7 @@ void _DivDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE alp
CheckDev(a->devID, b->devID); CheckDev(a->devID, b->devID);
if(IsSameShaped(a, b)){ if(_IsSameShaped(a, b)){
_Div(a, b, c, alpha); _Div(a, b, c, alpha);
return; return;
} }
...@@ -189,7 +189,7 @@ i.e., a is divided with b by broadcasting ...@@ -189,7 +189,7 @@ i.e., a is divided with b by broadcasting
*/ */
void DivDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE alpha) void DivDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE alpha)
{ {
if (!c.isInit || !IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -172,7 +172,7 @@ where i is the index of the element ...@@ -172,7 +172,7 @@ where i is the index of the element
*/ */
void Mask(const XTensor &a, const XTensor &mask, XTensor &c, DTYPE alpha) void Mask(const XTensor &a, const XTensor &mask, XTensor &c, DTYPE alpha)
{ {
if (!c.isInit || !IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -243,9 +243,9 @@ void _MatrixMulBatchedCPU(const TensorList * a, MATRIX_TRANS_TYPE transposedA, ...@@ -243,9 +243,9 @@ void _MatrixMulBatchedCPU(const TensorList * a, MATRIX_TRANS_TYPE transposedA,
XTensor * ai = (XTensor*)a->GetItem(i); XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i); XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i); XTensor * ci = (XTensor*)c->GetItem(i);
if (!IsSameShaped(aim, ai) || if (!_IsSameShaped(aim, ai) ||
!IsSameShaped(bim, bi) || !_IsSameShaped(bim, bi) ||
!IsSameShaped(cim, ci)) !_IsSameShaped(cim, ci))
{ {
isUniform = false; isUniform = false;
break; break;
......
...@@ -37,7 +37,7 @@ int GetSumIndex(const XTensor &a, const XTensor &b) ...@@ -37,7 +37,7 @@ int GetSumIndex(const XTensor &a, const XTensor &b)
{ {
if (a.order < b.order) if (a.order < b.order)
return -1; return -1;
if (IsSameShaped(&a, &b)) if (IsSameShaped(a, b))
return -1; return -1;
int hitCount = 0; int hitCount = 0;
......
...@@ -170,7 +170,7 @@ int GetMultiplyDimIndex(const XTensor &a, const XTensor &b) ...@@ -170,7 +170,7 @@ int GetMultiplyDimIndex(const XTensor &a, const XTensor &b)
{ {
if(a.order < b.order) if(a.order < b.order)
return -1; return -1;
if(IsSameShaped(&a, &b)) if(IsSameShaped(a, b))
return -1; return -1;
int hitCount = 0; int hitCount = 0;
...@@ -255,7 +255,7 @@ where i is the index of the item ...@@ -255,7 +255,7 @@ where i is the index of the item
*/ */
void Multiply(const XTensor &a, const XTensor &b, XTensor &c, DTYPE alpha, int leadingDim) void Multiply(const XTensor &a, const XTensor &b, XTensor &c, DTYPE alpha, int leadingDim)
{ {
if (!c.isInit || !IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -58,7 +58,7 @@ void _MultiplyDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYP ...@@ -58,7 +58,7 @@ void _MultiplyDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYP
CheckDev(a->devID, b->devID); CheckDev(a->devID, b->devID);
if(IsSameShaped(a, b)){ if(_IsSameShaped(a, b)){
_Multiply(a, b, c, alpha); _Multiply(a, b, c, alpha);
return; return;
} }
...@@ -204,7 +204,7 @@ i.e., a is multiplied with b by broadcasting ...@@ -204,7 +204,7 @@ i.e., a is multiplied with b by broadcasting
*/ */
void MultiplyDim(const XTensor &a, const XTensor &b, XTensor &c, int n) void MultiplyDim(const XTensor &a, const XTensor &b, XTensor &c, int n)
{ {
if (!c.isInit || !IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
...@@ -372,7 +372,7 @@ where some of dimensions of b can be of size 1 ...@@ -372,7 +372,7 @@ where some of dimensions of b can be of size 1
*/ */
void MultiplyBroadcast(const XTensor &a, const XTensor &b, XTensor &c) void MultiplyBroadcast(const XTensor &a, const XTensor &b, XTensor &c)
{ {
if (!c.isInit || !IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -150,7 +150,7 @@ int GetSubDimIndex(const XTensor &a, const XTensor &b) ...@@ -150,7 +150,7 @@ int GetSubDimIndex(const XTensor &a, const XTensor &b)
{ {
if(a.order < b.order) if(a.order < b.order)
return -1; return -1;
if(IsSameShaped(&a, &b)) if(IsSameShaped(a, b))
return -1; return -1;
int hitCount = 0; int hitCount = 0;
...@@ -224,7 +224,7 @@ tensor subtraction c = a - b * \beta ...@@ -224,7 +224,7 @@ tensor subtraction c = a - b * \beta
*/ */
void Sub(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta) void Sub(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta)
{ {
if (!c.isInit || !IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -62,7 +62,7 @@ void _SubDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet ...@@ -62,7 +62,7 @@ void _SubDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet
return; return;
} }
if (IsSameShaped(a, b)) { if (_IsSameShaped(a, b)) {
_Sub(a, b, c, beta); _Sub(a, b, c, beta);
return; return;
} }
...@@ -189,7 +189,7 @@ i.e., a is subtracted with b by broadcasting ...@@ -189,7 +189,7 @@ i.e., a is subtracted with b by broadcasting
*/ */
void SubDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE beta) void SubDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE beta)
{ {
if (!c.isInit || !IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -184,7 +184,7 @@ int GetSumDimIndex(const XTensor &a, const XTensor &b) ...@@ -184,7 +184,7 @@ int GetSumDimIndex(const XTensor &a, const XTensor &b)
{ {
if(a.order < b.order) if(a.order < b.order)
return -1; return -1;
if(IsSameShaped(&a, &b)) if(IsSameShaped(a, b))
return -1; return -1;
int hitCount = 0; int hitCount = 0;
...@@ -257,7 +257,7 @@ tensor summation c = a + b * \beta ...@@ -257,7 +257,7 @@ tensor summation c = a + b * \beta
*/ */
void Sum(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta) void Sum(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta)
{ {
if (!c.isInit || !IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -65,7 +65,7 @@ void _SumDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet ...@@ -65,7 +65,7 @@ void _SumDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE bet
return; return;
} }
if(IsSameShaped(a, b)){ if(_IsSameShaped(a, b)){
_Sum(a, b, c, beta); _Sum(a, b, c, beta);
return; return;
} }
...@@ -206,7 +206,7 @@ i.e., a is summed with b by broadcasting ...@@ -206,7 +206,7 @@ i.e., a is summed with b by broadcasting
*/ */
void SumDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE beta) void SumDim(const XTensor &a, const XTensor &b, XTensor &c, int n, DTYPE beta)
{ {
if (!c.isInit || !IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
...@@ -375,7 +375,7 @@ c = a + b * \beta ...@@ -375,7 +375,7 @@ c = a + b * \beta
*/ */
void SumBroadcast(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta) void SumBroadcast(const XTensor &a, const XTensor &b, XTensor &c, DTYPE beta)
{ {
if (!c.isInit || !IsSameShaped(&a, &c)) { if (!c.isInit || !IsSameShaped(a, c)) {
InitTensor(&c, &a); InitTensor(&c, &a);
} }
......
...@@ -225,9 +225,9 @@ void _CudaBLASMatrixMULList(cublasHandle_t * handle, ...@@ -225,9 +225,9 @@ void _CudaBLASMatrixMULList(cublasHandle_t * handle,
XTensor * ai = (XTensor*)a->GetItem(i); XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i); XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i); XTensor * ci = (XTensor*)c->GetItem(i);
if (!IsSameShaped(aim, ai) || if (!_IsSameShaped(aim, ai) ||
!IsSameShaped(bim, bi) || !_IsSameShaped(bim, bi) ||
!IsSameShaped(cim, ci)) !_IsSameShaped(cim, ci))
{ {
isUniform = false; isUniform = false;
break; break;
......
...@@ -78,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b, T num) ...@@ -78,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b, T num)
_cudaFuncName(a, b, num); \ _cudaFuncName(a, b, num); \
return; \ return; \
} \ } \
CheckNTErrors((IsSameShaped(a, b)), \ CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same data type!"); \ "Input tensors should have the same data type!"); \
if (a->dataType == X_INT) { \ if (a->dataType == X_INT) { \
int * d = (int*)a->data; \ int * d = (int*)a->data; \
...@@ -113,7 +113,7 @@ void _funcName(const XTensor * a, XTensor * b, T num) ...@@ -113,7 +113,7 @@ void _funcName(const XTensor * a, XTensor * b, T num)
if (a->devID >= 0) { \ if (a->devID >= 0) { \
ShowNTErrors("No GPU devices support!") \ ShowNTErrors("No GPU devices support!") \
} \ } \
CheckNTErrors((IsSameShaped(a, b)), \ CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same data type!"); \ "Input tensors should have the same data type!"); \
if (a->dataType == X_INT) { \ if (a->dataType == X_INT) { \
int * d = (int*)a->data; \ int * d = (int*)a->data; \
...@@ -182,7 +182,7 @@ template XTensor funcName<double>(const XTensor&, double); ...@@ -182,7 +182,7 @@ template XTensor funcName<double>(const XTensor&, double);
template<class T> \ template<class T> \
void funcName(const XTensor &a, XTensor &b, T num) \ void funcName(const XTensor &a, XTensor &b, T num) \
{ \ { \
if (!b.isInit || !IsSameShaped(&a, &b)) { \ if (!b.isInit || !IsSameShaped(a, b)) { \
InitTensor(&b, &a); \ InitTensor(&b, &a); \
} \ } \
_funcName(&a, &b, num); \ _funcName(&a, &b, num); \
......
...@@ -90,7 +90,7 @@ void Kernel##funcName(T1 * a, T1 * b, int size, T2 num) ...@@ -90,7 +90,7 @@ void Kernel##funcName(T1 * a, T1 * b, int size, T2 num)
template<class T> \ template<class T> \
void _Cuda##funcName(const XTensor * a, XTensor * b, T num) \ void _Cuda##funcName(const XTensor * a, XTensor * b, T num) \
{ \ { \
CheckNTErrors((IsSameShaped(a, b)), \ CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
CheckNTErrors((a->isSparse == false), "TODO!"); \ CheckNTErrors((a->isSparse == false), "TODO!"); \
\ \
......
...@@ -44,7 +44,7 @@ void _Clip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper) ...@@ -44,7 +44,7 @@ void _Clip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
} }
#endif #endif
CheckNTErrors((IsSameShaped(a, b)), "Input tensors should have the same type!"); CheckNTErrors((_IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!");
DTYPE * d = (DTYPE*)a->data; DTYPE * d = (DTYPE*)a->data;
...@@ -111,7 +111,7 @@ XTensor Clip(const XTensor & a, DTYPE lower, DTYPE upper) ...@@ -111,7 +111,7 @@ XTensor Clip(const XTensor & a, DTYPE lower, DTYPE upper)
void Clip(const XTensor & a, XTensor & b, DTYPE lower, DTYPE upper) void Clip(const XTensor & a, XTensor & b, DTYPE lower, DTYPE upper)
{ {
if (!b.isInit || !IsSameShaped(&a, &b)) { if (!b.isInit || !IsSameShaped(a, b)) {
InitTensor(&b, &a); InitTensor(&b, &a);
} }
......
...@@ -75,7 +75,7 @@ set each entry to its clip value ...@@ -75,7 +75,7 @@ set each entry to its clip value
*/ */
void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper) void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
{ {
CheckNTErrors((IsSameShaped(a, b)), "Input tensors should have the same type!"); CheckNTErrors((_IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!"); CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3]; int gridSize[3];
......
...@@ -42,7 +42,7 @@ DTYPE myIsNotEqual(DTYPE a, DTYPE b) ...@@ -42,7 +42,7 @@ DTYPE myIsNotEqual(DTYPE a, DTYPE b)
#define _SIMPLE_COMPARE_FUNCTION(_funcName, _cudaFuncName, origFunc) \ #define _SIMPLE_COMPARE_FUNCTION(_funcName, _cudaFuncName, origFunc) \
void _funcName(const XTensor * a, XTensor * b, DTYPE number) \ void _funcName(const XTensor * a, XTensor * b, DTYPE number) \
{ \ { \
CheckNTErrors((IsSameShaped(a, b)), \ CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \ CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \
/* run it on GPUs */ \ /* run it on GPUs */ \
...@@ -59,7 +59,7 @@ void _funcName(const XTensor * a, XTensor * b, DTYPE number) ...@@ -59,7 +59,7 @@ void _funcName(const XTensor * a, XTensor * b, DTYPE number)
#define _SIMPLE_COMPARE_FUNCTION(_funcName, origFunc) \ #define _SIMPLE_COMPARE_FUNCTION(_funcName, origFunc) \
void _funcName(const XTensor * a, XTensor * b, DTYPE number) \ void _funcName(const XTensor * a, XTensor * b, DTYPE number) \
{ \ { \
CheckNTErrors((IsSameShaped(a, b)), \ CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \ CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!"); \
/* run it on GPUs */ \ /* run it on GPUs */ \
...@@ -97,7 +97,7 @@ XTensor funcName(const XTensor &a, DTYPE number) ...@@ -97,7 +97,7 @@ XTensor funcName(const XTensor &a, DTYPE number)
#define SIMPLE_COMPARE_FUNCTION_VOID(funcName, _funcName, operationId) \ #define SIMPLE_COMPARE_FUNCTION_VOID(funcName, _funcName, operationId) \
void funcName(const XTensor &a, XTensor &b, DTYPE number) \ void funcName(const XTensor &a, XTensor &b, DTYPE number) \
{ \ { \
if (!b.isInit || !IsSameShaped(&a, &b)) { \ if (!b.isInit || !IsSameShaped(a, b)) { \
InitTensor(&b, &a); \ InitTensor(&b, &a); \
} \ } \
_funcName(&a, &b, number); \ _funcName(&a, &b, number); \
......
...@@ -48,9 +48,9 @@ void _Normalize(const XTensor * input, XTensor * output, int dim, ...@@ -48,9 +48,9 @@ void _Normalize(const XTensor * input, XTensor * output, int dim,
const XTensor * a, const XTensor * b, DTYPE epsilon) const XTensor * a, const XTensor * b, DTYPE epsilon)
{ {
int dimRDI = input->order - dim - 1; int dimRDI = input->order - dim - 1;
CheckNTErrors((IsSameShaped(input, output)), "Unmatched input tensors!"); CheckNTErrors((_IsSameShaped(input, output)), "Unmatched input tensors!");
CheckNTErrors((IsSameShaped(a, b)), "Unmatched input tensors"); CheckNTErrors((_IsSameShaped(a, b)), "Unmatched input tensors");
CheckNTErrors((IsSameShaped(mean, var)), "Unmatched input tensors"); CheckNTErrors((_IsSameShaped(mean, var)), "Unmatched input tensors");
CheckNTErrors((input && output && mean && var && a && b), "Empty input tensors!"); CheckNTErrors((input && output && mean && var && a && b), "Empty input tensors!");
CheckNTErrors((dimRDI >= 0 && dimRDI < input->order), "Incorrect reduction dimension!"); CheckNTErrors((dimRDI >= 0 && dimRDI < input->order), "Incorrect reduction dimension!");
CheckNTErrors((input->order == mean->order + 1), "Incorrect reduction dimension!"); CheckNTErrors((input->order == mean->order + 1), "Incorrect reduction dimension!");
...@@ -204,7 +204,7 @@ void Normalize(const XTensor &input, XTensor &output, int dim, ...@@ -204,7 +204,7 @@ void Normalize(const XTensor &input, XTensor &output, int dim,
const XTensor &mean, const XTensor &var, const XTensor &mean, const XTensor &var,
const XTensor &a, const XTensor &b, DTYPE epsilon) const XTensor &a, const XTensor &b, DTYPE epsilon)
{ {
if (!output.isInit || !IsSameShaped(&input, &output)) { if (!output.isInit || !IsSameShaped(input, output)) {
InitTensor(&output, &input); InitTensor(&output, &input);
} }
......
...@@ -148,7 +148,7 @@ b = a * scale + shift ...@@ -148,7 +148,7 @@ b = a * scale + shift
*/ */
void ScaleAndShift(const XTensor & a, XTensor & b, DTYPE scale, DTYPE shift) void ScaleAndShift(const XTensor & a, XTensor & b, DTYPE scale, DTYPE shift)
{ {
if (!b.isInit || !IsSameShaped(&a, &b)) { if (!b.isInit || !IsSameShaped(a, b)) {
InitTensor(&b, &a); InitTensor(&b, &a);
} }
......
...@@ -78,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b) ...@@ -78,7 +78,7 @@ void _funcName(const XTensor * a, XTensor * b)
_cudaFuncName(a, b); \ _cudaFuncName(a, b); \
return; \ return; \
} \ } \
CheckNTErrors((IsSameShaped(a, b)), \ CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
if (a->dataType == X_INT) { \ if (a->dataType == X_INT) { \
int * d = (int*)a->data; \ int * d = (int*)a->data; \
...@@ -109,7 +109,7 @@ void _funcName(const XTensor * a, XTensor * b) ...@@ -109,7 +109,7 @@ void _funcName(const XTensor * a, XTensor * b)
if (a->devID >= 0) { \ if (a->devID >= 0) { \
ShowNTErrors("No GPU devices support!") \ ShowNTErrors("No GPU devices support!") \
} \ } \
CheckNTErrors((IsSameShaped(a, b)), \ CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
if (a->dataType == X_INT) { \ if (a->dataType == X_INT) { \
int * d = (int*)a->data; \ int * d = (int*)a->data; \
...@@ -161,7 +161,7 @@ XTensor funcName(const XTensor & a) ...@@ -161,7 +161,7 @@ XTensor funcName(const XTensor & a)
#define SIMPLE_UNARY_FUNCTION_VOID(funcName, _funcName, operationId) \ #define SIMPLE_UNARY_FUNCTION_VOID(funcName, _funcName, operationId) \
void funcName(const XTensor & a, XTensor & b) \ void funcName(const XTensor & a, XTensor & b) \
{ \ { \
if (!b.isInit || !IsSameShaped(&a, &b)) { \ if (!b.isInit || !IsSameShaped(a, b)) { \
InitTensor(&b, &a); \ InitTensor(&b, &a); \
} \ } \
_funcName(&a, &b); \ _funcName(&a, &b); \
......
...@@ -155,7 +155,7 @@ void Kernel##funcName(T * a, T * b, int size) \ ...@@ -155,7 +155,7 @@ void Kernel##funcName(T * a, T * b, int size) \
} \ } \
void _Cuda##funcName(const XTensor * a, XTensor * b) \ void _Cuda##funcName(const XTensor * a, XTensor * b) \
{ \ { \
CheckNTErrors((IsSameShaped(a, b)), \ CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \ "Input tensors should have the same type!"); \
CheckNTErrors(a->isSparse == false, "TODO!"); \ CheckNTErrors(a->isSparse == false, "TODO!"); \
\ \
......
...@@ -39,7 +39,7 @@ in the k-th grid ...@@ -39,7 +39,7 @@ in the k-th grid
*/ */
void _CopyInGrid(const XTensor * s, XTensor * t, int * index, int blockDim, int blockNumInGrid, bool isIndexOnDev) void _CopyInGrid(const XTensor * s, XTensor * t, int * index, int blockDim, int blockNumInGrid, bool isIndexOnDev)
{ {
CheckNTErrors((IsSameShaped(s, t)), "Unmatched tensors!"); CheckNTErrors((_IsSameShaped(s, t)), "Unmatched tensors!");
int blockDimRDI = s->order - blockDim - 1; int blockDimRDI = s->order - blockDim - 1;
int blockSize = 1; int blockSize = 1;
......
...@@ -52,7 +52,7 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor ...@@ -52,7 +52,7 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor
CheckNTErrors((input->order == output->order + 1), "Incorrect tensor sizes!"); CheckNTErrors((input->order == output->order + 1), "Incorrect tensor sizes!");
CheckNTErrors((input->order > dim && dim >=0), "Illegal dimension to reduce!"); CheckNTErrors((input->order > dim && dim >=0), "Illegal dimension to reduce!");
CheckNTErrors((input->dataType == output->dataType), "Unmatched data types!"); CheckNTErrors((input->dataType == output->dataType), "Unmatched data types!");
CheckNTErrors((shift == NULL || IsSameShaped(output, shift)), "Incorrect shift tensor size!"); CheckNTErrors((shift == NULL || _IsSameShaped(output, shift)), "Incorrect shift tensor size!");
int dimRDI = input->order - dim - 1; int dimRDI = input->order - dim - 1;
CheckNTErrors(dimRDI >= 0, "Wrong dimension!"); CheckNTErrors(dimRDI >= 0, "Wrong dimension!");
......
...@@ -45,7 +45,7 @@ void _Concatenate(const TensorList * smalls, XTensor * big, int dim) ...@@ -45,7 +45,7 @@ void _Concatenate(const TensorList * smalls, XTensor * big, int dim)
XTensor * a = (XTensor*)smalls->GetItem(i - 1); XTensor * a = (XTensor*)smalls->GetItem(i - 1);
XTensor * b = (XTensor*)smalls->GetItem(i); XTensor * b = (XTensor*)smalls->GetItem(i);
CheckNTErrors((a && b), "Empty input tensors!"); CheckNTErrors((a && b), "Empty input tensors!");
if (!IsSameShaped(a, b)) if (!_IsSameShaped(a, b))
uniform = false; uniform = false;
} }
...@@ -77,7 +77,7 @@ XTensor Concatenate(const TensorList &smalls, int dim) ...@@ -77,7 +77,7 @@ XTensor Concatenate(const TensorList &smalls, int dim)
XTensor * a = (XTensor*)smalls.GetItem(i - 1); XTensor * a = (XTensor*)smalls.GetItem(i - 1);
XTensor * b = (XTensor*)smalls.GetItem(i); XTensor * b = (XTensor*)smalls.GetItem(i);
CheckNTErrors((a && b), "Empty input tensors!"); CheckNTErrors((a && b), "Empty input tensors!");
if (!IsSameShaped(a, b)) if (!_IsSameShaped(a, b))
uniform = false; uniform = false;
} }
XTensor * tensor = (XTensor*)smalls.GetItem(0); XTensor * tensor = (XTensor*)smalls.GetItem(0);
...@@ -190,7 +190,7 @@ void Concatenate(const TensorList & smalls, XTensor & big, int dim) ...@@ -190,7 +190,7 @@ void Concatenate(const TensorList & smalls, XTensor & big, int dim)
XTensor * a = (XTensor*)smalls.GetItem(i - 1); XTensor * a = (XTensor*)smalls.GetItem(i - 1);
XTensor * b = (XTensor*)smalls.GetItem(i); XTensor * b = (XTensor*)smalls.GetItem(i);
CheckNTErrors((a && b), "Empty input tensors!"); CheckNTErrors((a && b), "Empty input tensors!");
if (!IsSameShaped(a, b)) if (!_IsSameShaped(a, b))
uniform = false; uniform = false;
} }
...@@ -291,7 +291,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim) ...@@ -291,7 +291,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
XTensor * a = (XTensor*)smalls.Get(i - 1); XTensor * a = (XTensor*)smalls.Get(i - 1);
XTensor * b = (XTensor*)smalls.Get(i); XTensor * b = (XTensor*)smalls.Get(i);
CheckNTErrors((a && b), "Empty input tensors!"); CheckNTErrors((a && b), "Empty input tensors!");
if (!IsSameShaped(a, b)) if (!_IsSameShaped(a, b))
uniform = false; uniform = false;
} }
XTensor * tensor = (XTensor*)smalls.Get(0); XTensor * tensor = (XTensor*)smalls.Get(0);
......
...@@ -30,7 +30,7 @@ check whether the two matrices are in the same type and size ...@@ -30,7 +30,7 @@ check whether the two matrices are in the same type and size
>> b - anther tensor to compare with >> b - anther tensor to compare with
<< return - whether the two input tensors are identical << return - whether the two input tensors are identical
*/ */
bool IsSameShaped(const XTensor * a, const XTensor * b) bool _IsSameShaped(const XTensor * a, const XTensor * b)
{ {
if(a == NULL || b == NULL) if(a == NULL || b == NULL)
return false; return false;
...@@ -56,15 +56,38 @@ bool IsSameShaped(const XTensor * a, const XTensor * b) ...@@ -56,15 +56,38 @@ bool IsSameShaped(const XTensor * a, const XTensor * b)
} }
/* /*
check whether the two matrices are in the same type and size
>> a - input tensor
>> b - anther tensor to compare with
<< return - whether the two input tensors are identical
*/
bool IsSameShaped(const XTensor & a, const XTensor & b)
{
return _IsSameShaped(&a, &b);
}
/*
check whether the three matrices are in the same type and size check whether the three matrices are in the same type and size
>> a - input tensor >> a - input tensor
>> b - anther tensor to compare with >> b - anther tensor to compare with
>> c - a tensor again >> c - a tensor again
<< return - whether the two input tensors are identical << return - whether the two input tensors are identical
*/ */
bool IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c) bool _IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c)
{ {
return IsSameShaped(a, b) && IsSameShaped(a, c); return IsSameShaped(a, b) && IsSameShaped(a, c);
} }
/*
check whether the three matrices are in the same type and size
>> a - input tensor
>> b - anther tensor to compare with
>> c - a tensor again
<< return - whether the two input tensors are identical
*/
bool IsSameShaped(const XTensor & a, const XTensor & b, const XTensor & c)
{
return _IsSameShaped(&a, &b, &c);
}
} // namespace nts(NiuTrans.Tensor) } // namespace nts(NiuTrans.Tensor)
\ No newline at end of file
...@@ -27,10 +27,16 @@ ...@@ -27,10 +27,16 @@
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
/* judge whether the two matrices are in the same type and size */ /* judge whether the two matrices are in the same type and size */
bool IsSameShaped(const XTensor * a, const XTensor * b); bool _IsSameShaped(const XTensor * a, const XTensor * b);
/* judge whether the two matrices are in the same type and size */
bool IsSameShaped(const XTensor & a, const XTensor & b);
/* judge whether the three matrices are in the same type and size */
bool _IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c);
/* judge whether the three matrices are in the same type and size */ /* judge whether the three matrices are in the same type and size */
bool IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c); bool IsSameShaped(const XTensor & a, const XTensor & b, const XTensor & c);
} // namespace nts(NiuTrans.Tensor) } // namespace nts(NiuTrans.Tensor)
......
...@@ -434,7 +434,7 @@ merge two tensors into a big tensor (return an XTensor structure) ...@@ -434,7 +434,7 @@ merge two tensors into a big tensor (return an XTensor structure)
*/ */
XTensor Merge(const XTensor &smallA, const XTensor &smallB, int whereToMerge) XTensor Merge(const XTensor &smallA, const XTensor &smallB, int whereToMerge)
{ {
CheckNTErrors(IsSameShaped(&smallA, &smallB), CheckNTErrors(IsSameShaped(smallA, smallB),
"The two tensors must be of the same size!"); "The two tensors must be of the same size!");
int order = smallA.order; int order = smallA.order;
......
...@@ -53,7 +53,7 @@ XTensor Reshape(XTensor &s, int order, int * dimSize) ...@@ -53,7 +53,7 @@ XTensor Reshape(XTensor &s, int order, int * dimSize)
void Reshape(XTensor &s, XTensor &t, int order, int * dimSize) void Reshape(XTensor &s, XTensor &t, int order, int * dimSize)
{ {
if (!t.isInit || !IsSameShaped(&t, &s)) { if (!t.isInit || !IsSameShaped(t, s)) {
InitTensor(&t, &s); InitTensor(&t, &s);
} }
......
...@@ -38,7 +38,7 @@ void _Squeeze(XTensor * source, XTensor * target, int leadingDim) ...@@ -38,7 +38,7 @@ void _Squeeze(XTensor * source, XTensor * target, int leadingDim)
{ {
int order = target->order; int order = target->order;
CheckNTErrors(IsSameShaped(source, target), CheckNTErrors(_IsSameShaped(source, target),
"The source and target tensor must be of the same size!"); "The source and target tensor must be of the same size!");
CheckNTErrors(leadingDim >= -1 && leadingDim < order, CheckNTErrors(leadingDim >= -1 && leadingDim < order,
"Wrong leading dimension"); "Wrong leading dimension");
...@@ -131,7 +131,7 @@ XTensor Squeeze(XTensor & source, int leadingDim) ...@@ -131,7 +131,7 @@ XTensor Squeeze(XTensor & source, int leadingDim)
void Squeeze(XTensor & source, XTensor & target, int leadingDim) void Squeeze(XTensor & source, XTensor & target, int leadingDim)
{ {
if (!target.isInit || !IsSameShaped(&source, &target)) { if (!target.isInit || !IsSameShaped(source, target)) {
InitTensor(&target, &source); InitTensor(&target, &source);
} }
......
...@@ -41,7 +41,7 @@ void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim) ...@@ -41,7 +41,7 @@ void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim)
{ {
dim = MODX(dim, a->order); dim = MODX(dim, a->order);
CheckNTErrors((IsSameShaped(a, b)), "Input tensors should have the same type!"); CheckNTErrors((_IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((dim >= 0 && dim < a->order), "Incorrect dimension specified!"); CheckNTErrors((dim >= 0 && dim < a->order), "Incorrect dimension specified!");
CheckNTErrors((a->order == index->order), "Unmatched input tensors!"); CheckNTErrors((a->order == index->order), "Unmatched input tensors!");
CheckNTErrors((index->dataType == X_INT), "Wrong data type!"); CheckNTErrors((index->dataType == X_INT), "Wrong data type!");
......
...@@ -37,7 +37,7 @@ y = 1 if x > 1 ...@@ -37,7 +37,7 @@ y = 1 if x > 1
*/ */
void _HardTanH(const XTensor * x, XTensor * y) void _HardTanH(const XTensor * x, XTensor * y)
{ {
CheckNTErrors(IsSameShaped(x, y), CheckNTErrors(_IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!") "The input tensor and output tensor must have the same shape!")
#ifdef USE_CUDA #ifdef USE_CUDA
...@@ -88,7 +88,7 @@ XTensor HardTanH(const XTensor &x) ...@@ -88,7 +88,7 @@ XTensor HardTanH(const XTensor &x)
void HardTanH(const XTensor &x, XTensor &y) void HardTanH(const XTensor &x, XTensor &y)
{ {
if (!y.isInit || !IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
......
...@@ -34,7 +34,7 @@ identity function y = x ...@@ -34,7 +34,7 @@ identity function y = x
*/ */
void _Identity(const XTensor * x, XTensor * y) void _Identity(const XTensor * x, XTensor * y)
{ {
CheckNTErrors(IsSameShaped(x, y), CheckNTErrors(_IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!") "The input tensor and output tensor must have the same shape!")
_CopyValues(x, y); _CopyValues(x, y);
} }
...@@ -64,7 +64,7 @@ XTensor Identity(const XTensor &x) ...@@ -64,7 +64,7 @@ XTensor Identity(const XTensor &x)
void Identity(const XTensor &x, XTensor &y) void Identity(const XTensor &x, XTensor &y)
{ {
if (!y.isInit || !IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
......
...@@ -211,7 +211,7 @@ void LogSoftmax(const XTensor &x, XTensor &y, int leadDim) ...@@ -211,7 +211,7 @@ void LogSoftmax(const XTensor &x, XTensor &y, int leadDim)
if (ld < 0) if (ld < 0)
ld = x.order - 1; ld = x.order - 1;
if (!y.isInit || !IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
...@@ -353,7 +353,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -353,7 +353,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
} }
} }
else { else {
CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!"); CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) { for (int k = 0; k < blockNum; k++) {
gp = (DTYPE*)gold->data + k * blockSize; gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize; op = (DTYPE*)y->data + k * blockSize;
...@@ -407,7 +407,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -407,7 +407,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
} }
} }
else { else {
CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!"); CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) { for (int k = 0; k < blockNum; k++) {
gp = (DTYPE*)gold->data + k * blockSize; gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize; op = (DTYPE*)y->data + k * blockSize;
......
...@@ -431,7 +431,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -431,7 +431,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName); dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName);
} }
else { else {
CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!"); CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) { for (int k = 0; k < blockNum; k++) {
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize); GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
......
...@@ -49,7 +49,7 @@ DTYPE _LossCompute(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName, ...@@ -49,7 +49,7 @@ DTYPE _LossCompute(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName,
DTYPE error = 0.0F; DTYPE error = 0.0F;
if (output->devID < 0) { if (output->devID < 0) {
CheckNTErrors((gLen >= 0 && gLen <= output->unitNum), "Illegal input length!"); CheckNTErrors((gLen >= 0 && gLen <= output->unitNum), "Illegal input length!");
CheckNTErrors((IsSameShaped(gold, output)), "The input tensors must be of the same size!"); CheckNTErrors((_IsSameShaped(gold, output)), "The input tensors must be of the same size!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1), "TODO!"); CheckNTErrors((gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!"); CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE), "TODO!"); CheckNTErrors((gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE), "TODO!");
...@@ -206,7 +206,7 @@ DTYPE _LossComputeForLogScale(XTensor * gold, XTensor * output, ...@@ -206,7 +206,7 @@ DTYPE _LossComputeForLogScale(XTensor * gold, XTensor * output,
int leadDim, int gBeg, int gLen, int oBeg) int leadDim, int gBeg, int gLen, int oBeg)
{ {
CheckNTErrors(gLen >= 0 && gLen <= output->unitNum, "Illegal input length!"); CheckNTErrors(gLen >= 0 && gLen <= output->unitNum, "Illegal input length!");
CheckNTErrors(IsSameShaped(gold, output), "The input tensors must be of the same size!"); CheckNTErrors(_IsSameShaped(gold, output), "The input tensors must be of the same size!");
CheckNTErrors(gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1, "TODO!"); CheckNTErrors(gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1, "TODO!");
CheckNTErrors(gold->order > leadDim && leadDim >= 0, "Illegal leading dimension!"); CheckNTErrors(gold->order > leadDim && leadDim >= 0, "Illegal leading dimension!");
CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!"); CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!");
...@@ -402,7 +402,7 @@ void _LossBackward(XTensor * dedy, XTensor * t, XTensor * y, ...@@ -402,7 +402,7 @@ void _LossBackward(XTensor * dedy, XTensor * t, XTensor * y,
if (y->devID < 0) { if (y->devID < 0) {
CheckNTErrors(tLen <= y->unitNum, "Illegal input length!"); CheckNTErrors(tLen <= y->unitNum, "Illegal input length!");
CheckNTErrors(IsSameShaped(t, y)&& IsSameShaped(dedy, y), CheckNTErrors(_IsSameShaped(t, y)&& _IsSameShaped(dedy, y),
"The input tensors must be of the same size!"); "The input tensors must be of the same size!");
CheckNTErrors((dedy->devID == t->devID) && (dedy->devID == y->devID), CheckNTErrors((dedy->devID == t->devID) && (dedy->devID == y->devID),
"Tensor must be on the same device!"); "Tensor must be on the same device!");
......
...@@ -55,7 +55,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName, ...@@ -55,7 +55,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg) bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg)
{ {
CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!"); CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((IsSameShaped(gold, y)), "The input tensors must be of the same size!"); CheckNTErrors((_IsSameShaped(gold, y)), "The input tensors must be of the same size!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!"); CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!"); CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE), "TODO!"); CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE), "TODO!");
...@@ -332,7 +332,7 @@ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y, ...@@ -332,7 +332,7 @@ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y,
int leadDim, int tBeg, int tLen, int yBeg) int leadDim, int tBeg, int tLen, int yBeg)
{ {
CheckNTErrors((tLen <= y->unitNum), "Illegal input length!"); CheckNTErrors((tLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((IsSameShaped(t, y)&& IsSameShaped(dedy, y)), CheckNTErrors((_IsSameShaped(t, y)&& _IsSameShaped(dedy, y)),
"The input tensors must be of the same size!"); "The input tensors must be of the same size!");
CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)), CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)),
"Tensor must be on the same device!"); "Tensor must be on the same device!");
......
...@@ -33,7 +33,7 @@ rectify function y = max(0, x) ...@@ -33,7 +33,7 @@ rectify function y = max(0, x)
*/ */
void _Rectify(const XTensor * x, XTensor * y) void _Rectify(const XTensor * x, XTensor * y)
{ {
CheckNTErrors(IsSameShaped(x, y), CheckNTErrors(_IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!") "The input tensor and output tensor must have the same shape!")
#ifdef USE_CUDA #ifdef USE_CUDA
...@@ -80,7 +80,7 @@ XTensor Rectify(const XTensor &x) ...@@ -80,7 +80,7 @@ XTensor Rectify(const XTensor &x)
void Rectify(const XTensor &x, XTensor &y) void Rectify(const XTensor &x, XTensor &y)
{ {
if (!y.isInit || !IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
......
...@@ -35,7 +35,7 @@ sigmoid function y = 1/(1+exp(-x)) ...@@ -35,7 +35,7 @@ sigmoid function y = 1/(1+exp(-x))
*/ */
void _Sigmoid(const XTensor * x, XTensor * y) void _Sigmoid(const XTensor * x, XTensor * y)
{ {
CheckNTErrors(IsSameShaped(x, y), CheckNTErrors(_IsSameShaped(x, y),
"The input tensor and output tensor must have the same shape!") "The input tensor and output tensor must have the same shape!")
#ifdef USE_CUDA #ifdef USE_CUDA
...@@ -83,7 +83,7 @@ XTensor Sigmoid(const XTensor &x) ...@@ -83,7 +83,7 @@ XTensor Sigmoid(const XTensor &x)
void Sigmoid(const XTensor &x, XTensor &y) void Sigmoid(const XTensor &x, XTensor &y)
{ {
if (!y.isInit || !IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
......
...@@ -157,7 +157,7 @@ void Softmax(const XTensor &x, XTensor &y, int leadDim) ...@@ -157,7 +157,7 @@ void Softmax(const XTensor &x, XTensor &y, int leadDim)
if (ld < 0) if (ld < 0)
ld = x.order - 1; ld = x.order - 1;
if (!y.isInit || !IsSameShaped(&y, &x)) { if (!y.isInit || !IsSameShaped(y, x)) {
InitTensor(&y, &x); InitTensor(&y, &x);
} }
...@@ -253,7 +253,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -253,7 +253,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
} }
} }
else{ else{
CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!"); CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for(int k = 0; k < blockNum; k++){ for(int k = 0; k < blockNum; k++){
gp = (DTYPE*)gold->data + k * blockSize; gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize; op = (DTYPE*)y->data + k * blockSize;
...@@ -292,7 +292,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -292,7 +292,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
} }
} }
else{ else{
CheckNTErrors((IsSameShaped(gold, y)), "The tensors must be of the same size!"); CheckNTErrors((_IsSameShaped(gold, y)), "The tensors must be of the same size!");
for(int k = 0; k < blockNum; k++){ for(int k = 0; k < blockNum; k++){
gp = (DTYPE*)gold->data + k * blockSize; gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize; op = (DTYPE*)y->data + k * blockSize;
......
...@@ -224,7 +224,7 @@ void _CudaSoftmaxSumMax(const XTensor * x, XTensor * y, int leadDim, XTensor * s ...@@ -224,7 +224,7 @@ void _CudaSoftmaxSumMax(const XTensor * x, XTensor * y, int leadDim, XTensor * s
{ {
CheckNTErrors((x->devID >= 0), "Forward computation of softmax must be run on GPUs."); CheckNTErrors((x->devID >= 0), "Forward computation of softmax must be run on GPUs.");
CheckNTErrors((x->devID == y->devID), "Tensors used in softmax are not on the same GPU."); CheckNTErrors((x->devID == y->devID), "Tensors used in softmax are not on the same GPU.");
CheckNTErrors((IsSameShaped(x, y)), "Input tensors must be of the same size!"); CheckNTErrors((_IsSameShaped(x, y)), "Input tensors must be of the same size!");
int leadDimRDI = y->order - leadDim - 1; int leadDimRDI = y->order - leadDim - 1;
int dimensionSize = y->dimSizeRDI[leadDimRDI]; int dimensionSize = y->dimSizeRDI[leadDimRDI];
......
...@@ -55,10 +55,10 @@ void _CrossEntropy(const XTensor * output, const XTensor * gold, ...@@ -55,10 +55,10 @@ void _CrossEntropy(const XTensor * output, const XTensor * gold,
int unitNum = output->dimSize[n]; int unitNum = output->dimSize[n];
CheckNTErrors(n >= 0 && n < output->order, "Wrong leadingDim!"); CheckNTErrors(n >= 0 && n < output->order, "Wrong leadingDim!");
CheckNTErrors(IsSameShaped(output, gold), CheckNTErrors(_IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == unitNum, "Wrong weight tensor!"); CheckNTErrors(weight == NULL || weight->unitNum == unitNum, "Wrong weight tensor!");
CheckNTErrors(padding == NULL || IsSameShaped(padding, loss), CheckNTErrors(padding == NULL || _IsSameShaped(padding, loss),
"The loss tensor and padding tensor must be same shape!"); "The loss tensor and padding tensor must be same shape!");
CheckNTErrors(loss->order == output->order - 1, "Wrong loss dimension!"); CheckNTErrors(loss->order == output->order - 1, "Wrong loss dimension!");
CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!"); CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!");
...@@ -102,11 +102,11 @@ void _CrossEntropyFast(const XTensor * output, const XTensor * gold, ...@@ -102,11 +102,11 @@ void _CrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors(n >= 0 && n < output->order, CheckNTErrors(n >= 0 && n < output->order,
"Wrong leading dimension!"); "Wrong leading dimension!");
CheckNTErrors(IsSameShaped(output, gold), CheckNTErrors(_IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize, CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!"); "Wrong weight tensor!");
CheckNTErrors(padding == NULL || IsSameShaped(padding, loss), CheckNTErrors(padding == NULL || _IsSameShaped(padding, loss),
"The loss tensor and padding tensor must be same shape!"); "The loss tensor and padding tensor must be same shape!");
CheckNTErrors(loss->order == output->order - 1, CheckNTErrors(loss->order == output->order - 1,
"Wrong loss dimension!"); "Wrong loss dimension!");
...@@ -338,7 +338,7 @@ DTYPE _CrossEntropy(const XTensor * output, const XTensor * gold, ...@@ -338,7 +338,7 @@ DTYPE _CrossEntropy(const XTensor * output, const XTensor * gold,
int unitNum = output->dimSize[n]; int unitNum = output->dimSize[n];
CheckNTErrors(n >= 0 && n < output->order, "Wrong leadingDim!"); CheckNTErrors(n >= 0 && n < output->order, "Wrong leadingDim!");
CheckNTErrors(IsSameShaped(output, gold), CheckNTErrors(_IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == unitNum, "Wrong weight tensor!"); CheckNTErrors(weight == NULL || weight->unitNum == unitNum, "Wrong weight tensor!");
CheckNTErrors(padding == NULL || padding->order == output->order - 1, CheckNTErrors(padding == NULL || padding->order == output->order - 1,
...@@ -413,7 +413,7 @@ DTYPE _CrossEntropyFast(const XTensor * output, const XTensor * gold, ...@@ -413,7 +413,7 @@ DTYPE _CrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors(n >= 0 && n < output->order, CheckNTErrors(n >= 0 && n < output->order,
"Wrong leadingDim!"); "Wrong leadingDim!");
CheckNTErrors(IsSameShaped(output, gold), CheckNTErrors(_IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize, CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!"); "Wrong weight tensor!");
...@@ -565,7 +565,7 @@ void _CrossEntropyBackward(XTensor * dedy, const XTensor * output, ...@@ -565,7 +565,7 @@ void _CrossEntropyBackward(XTensor * dedy, const XTensor * output,
CheckNTErrors(n >= 0 && n < output->order, CheckNTErrors(n >= 0 && n < output->order,
"Wrong leading dimension!"); "Wrong leading dimension!");
CheckNTErrors(IsSameShaped(dedy, output, gold), CheckNTErrors(_IsSameShaped(dedy, output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize, CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!"); "Wrong weight tensor!");
......
...@@ -101,7 +101,7 @@ DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold, ...@@ -101,7 +101,7 @@ DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
CheckNTErrors(n >= 0 && n < output->order, CheckNTErrors(n >= 0 && n < output->order,
"Wrong leadingDim!"); "Wrong leadingDim!");
CheckNTErrors(IsSameShaped(output, gold), CheckNTErrors(_IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!"); "The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize, CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!"); "Wrong weight tensor!");
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论