Commit 7ae1562d by xiaotong

replace XTensor::IsIdentical with XTensor::IsSameShaped

parent 37b7e09b
......@@ -96,7 +96,7 @@ void XMathGrad::GradMultiply(XTensor * node)
XNoder::MakeGrad(a);
XNoder::MakeGrad(b);
CheckNTErrors(XTensor::IsIdentical(a, b), "Wrong sized input tensors!");
CheckNTErrors(XTensor::IsSameShaped(a, b), "Wrong sized input tensors!");
_Multiply(node->grad, b, a->grad, 1.0F);
_Multiply(node->grad, a, b->grad, 1.0F);
}
......
......@@ -164,7 +164,7 @@ void XShapeGrad::GradMergeList(XTensor * node)
smallsGrad.Add(tail->grad);
if(i > 1){
CheckNTErrors(XTensor::IsIdentical(last, tail),
CheckNTErrors(XTensor::IsSameShaped(last, tail),
"Input tensors must be of the same size!");
}
......
......@@ -29,7 +29,7 @@ void XNoder::MakeGrad(XTensor * node)
if(node == NULL)
return;
if(!XTensor::IsIdentical(node, node->grad)){
if(!XTensor::IsSameShaped(node, node->grad)){
delete node->grad;
node->grad = NewTensor(node);
node->grad->SetZeroAll();
......
......@@ -370,7 +370,7 @@ judge whether the two matrices are in the same type and size
>> b - anther tensor to compare with
<< return - whether the two input tensors are identical
*/
bool XTensor::IsIdentical(const XTensor * a, const XTensor * b)
bool XTensor::IsSameShaped(const XTensor * a, const XTensor * b)
{
if(a == NULL || b == NULL)
return false;
......@@ -402,9 +402,9 @@ judge whether the three matrices are in the same type and size
>> c - a tensor again
<< return - whether the two input tensors are identical
*/
bool XTensor::IsIdentical(XTensor * a, XTensor * b, XTensor * c)
bool XTensor::IsSameShaped(XTensor * a, XTensor * b, XTensor * c)
{
return IsIdentical(a, b) && IsIdentical(a, c);
return IsSameShaped(a, b) && IsSameShaped(a, c);
}
/*
......
......@@ -207,11 +207,11 @@ public:
/* judge whether the two matrices are in the same type and size */
static
bool IsIdentical(const XTensor * a, const XTensor * b);
bool IsSameShaped(const XTensor * a, const XTensor * b);
/* judge whether the three matrices are in the same type and size */
static
bool IsIdentical(XTensor * a, XTensor * b, XTensor * c);
bool IsSameShaped(XTensor * a, XTensor * b, XTensor * c);
/* set the size of each dimension */
void SetDim(int * myDimSize);
......
......@@ -42,7 +42,7 @@ void _Absolute(const XTensor * a, XTensor * b)
}
#endif
CheckNTErrors((XTensor::IsIdentical(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!");
DTYPE * d = (DTYPE*)a->data;
DTYPE * db = (DTYPE*)b->data;
......
......@@ -63,7 +63,7 @@ set each entry to its absolute value
extern "C"
void _CudaAbsolute(const XTensor * a, XTensor * b)
{
CheckNTErrors((XTensor::IsIdentical(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
......
......@@ -55,9 +55,9 @@ void _MatrixMULBatchedCPU(const XList * a, MATRIX_TRANS_TYPE transposedA,
XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i);
if (!XTensor::IsIdentical(aim, ai) ||
!XTensor::IsIdentical(bim, bi) ||
!XTensor::IsIdentical(cim, ci))
if (!XTensor::IsSameShaped(aim, ai) ||
!XTensor::IsSameShaped(bim, bi) ||
!XTensor::IsSameShaped(cim, ci))
{
isUniform = false;
break;
......
......@@ -41,7 +41,7 @@ void _Negate(const XTensor * a, XTensor * b)
}
#endif
CheckNTErrors((XTensor::IsIdentical(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!");
DTYPE * d = (DTYPE*)a->data;
DTYPE * db = (DTYPE*)b->data;
......
......@@ -71,7 +71,7 @@ set each entry to its negtive value
extern "C"
void _CudaNegate(const XTensor * a, XTensor * b)
{
CheckNTErrors((XTensor::IsIdentical(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
......
......@@ -41,7 +41,7 @@ void _Sign(const XTensor * a, XTensor * b)
}
#endif
CheckNTErrors((XTensor::IsIdentical(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!");
DTYPE * d = (DTYPE*)a->data;
DTYPE * db = (DTYPE*)b->data;
......
......@@ -69,7 +69,7 @@ set each entry to its sign value
extern "C"
void _CudaSign(const XTensor * a, XTensor * b)
{
CheckNTErrors((XTensor::IsIdentical(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
......
......@@ -40,7 +40,7 @@ where b is a vector.
void _SumByColumnTV(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta)
{
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((XTensor::IsIdentical(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((XTensor::IsSameShaped(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((b->order == 2 && b->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
"Illegal input vector size!");
......
......@@ -67,7 +67,7 @@ where b is a vector.
void _CudaSumByColumnTV(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta)
{
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((XTensor::IsIdentical(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((XTensor::IsSameShaped(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((b->order == 2 && b->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
"Illegal input vector size!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE &&
......
......@@ -40,7 +40,7 @@ where c and a are vectors, and b_col is a column in b.
void _SumByColumnVT(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta)
{
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((XTensor::IsIdentical(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((XTensor::IsSameShaped(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((a->order == 2 && a->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
"Illegal input vector size!");
......
......@@ -83,7 +83,7 @@ where c and a are vectors, and b_col is a column in b.
void _CudaSumByColumnVT(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta)
{
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((XTensor::IsIdentical(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((XTensor::IsSameShaped(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((a->order == 2 && a->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
"Illegal input vector size!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE &&
......
......@@ -225,9 +225,9 @@ void _CudaBLASMatrixMULList(cublasHandle_t * handle,
XTensor * ai = (XTensor*)a->GetItem(i);
XTensor * bi = (XTensor*)b->GetItem(i);
XTensor * ci = (XTensor*)c->GetItem(i);
if (!XTensor::IsIdentical(aim, ai) ||
!XTensor::IsIdentical(bim, bi) ||
!XTensor::IsIdentical(cim, ci))
if (!XTensor::IsSameShaped(aim, ai) ||
!XTensor::IsSameShaped(bim, bi) ||
!XTensor::IsSameShaped(cim, ci))
{
isUniform = false;
break;
......
......@@ -42,7 +42,7 @@ void _Log(const XTensor * a, XTensor * b)
}
#endif
CheckNTErrors((XTensor::IsIdentical(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!");
DTYPE * d = (DTYPE*)a->data;
DTYPE * db = (DTYPE*)b->data;
......
......@@ -63,7 +63,7 @@ set each entry to its log value
extern "C"
void _CudaLog(const XTensor * a, XTensor * b)
{
CheckNTErrors((XTensor::IsIdentical(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
......
......@@ -45,9 +45,9 @@ where a and b are the scalar and bias respectively, and \epsilon is the adjustme
void _Normalize(const XTensor * input, XTensor * output, int dim, const XTensor * mean, const XTensor * var, const XTensor * a, const XTensor * b, DTYPE epsilon)
{
int dimRDI = input->order - dim - 1;
CheckNTErrors((XTensor::IsIdentical(input, output)), "Unmatched input tensors!");
CheckNTErrors((XTensor::IsIdentical(a, b)), "Unmatched input tensors");
CheckNTErrors((XTensor::IsIdentical(mean, var)), "Unmatched input tensors");
CheckNTErrors((XTensor::IsSameShaped(input, output)), "Unmatched input tensors!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Unmatched input tensors");
CheckNTErrors((XTensor::IsSameShaped(mean, var)), "Unmatched input tensors");
CheckNTErrors((input && output && mean && var && a && b), "Empty input tensors!");
CheckNTErrors((dimRDI >= 0 && dimRDI < input->order), "Incorrect reduction dimension!");
CheckNTErrors((dimRDI == a->order - 1), "Incorrect reduction dimension!");
......
......@@ -103,7 +103,7 @@ void KernelPower(__half * a, __half * b, __half p, int size)
extern "C"
void _CudaPower(const XTensor * a, XTensor * b, DTYPE p)
{
CheckNTErrors((XTensor::IsIdentical(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
int gridSize[3];
int blockSize[3];
......
......@@ -38,7 +38,7 @@ in the k-th grid
*/
void _CopyInGrid(const XTensor * s, XTensor * t, int * index, int blockDim, int blockNumInGrid, bool isIndexOnDev)
{
CheckNTErrors((XTensor::IsIdentical(s, t)), "Unmatched tensors!");
CheckNTErrors((XTensor::IsSameShaped(s, t)), "Unmatched tensors!");
int blockDimRDI = s->order - blockDim - 1;
int blockSize = 1;
......
......@@ -48,7 +48,7 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor
CheckNTErrors((input->order == output->order + 1), "Incorrect tensor sizes!");
CheckNTErrors((input->order > dim && dim >=0), "Illegal dimension to reduce!");
CheckNTErrors((input->dataType == output->dataType), "Unmatched data types!");
CheckNTErrors((shift == NULL || XTensor::IsIdentical(output, shift)), "Incorrect shift tensor size!");
CheckNTErrors((shift == NULL || XTensor::IsSameShaped(output, shift)), "Incorrect shift tensor size!");
int dimRDI = input->order - dim - 1;
for(int i = 0; i < input->order; i++){
......
......@@ -44,7 +44,7 @@ void _Concatenate(const XList * smalls, XTensor * big, int dim)
XTensor * a = (XTensor*)smalls->GetItem(i - 1);
XTensor * b = (XTensor*)smalls->GetItem(i);
CheckNTErrors((a && b), "Empty input tensors!");
if (!XTensor::IsIdentical(a, b))
if (!XTensor::IsSameShaped(a, b))
uniform = false;
}
......@@ -76,7 +76,7 @@ XTensor Concatenate(const XList &smalls, int dim)
XTensor * a = (XTensor*)smalls.GetItem(i - 1);
XTensor * b = (XTensor*)smalls.GetItem(i);
CheckNTErrors((a && b), "Empty input tensors!");
if (!XTensor::IsIdentical(a, b))
if (!XTensor::IsSameShaped(a, b))
uniform = false;
}
XTensor * tensor = (XTensor*)smalls.GetItem(0);
......@@ -177,7 +177,7 @@ XTensor Concatenate(const XTensor &smallA, const XTensor &smallB, int dim)
XTensor * a = (XTensor*)smalls.Get(i - 1);
XTensor * b = (XTensor*)smalls.Get(i);
CheckNTErrors((a && b), "Empty input tensors!");
if (!XTensor::IsIdentical(a, b))
if (!XTensor::IsSameShaped(a, b))
uniform = false;
}
XTensor * tensor = (XTensor*)smalls.Get(0);
......
......@@ -356,7 +356,7 @@ merge two tensors into a big tensor (return a XTensor structure)
*/
XTensor Merge(const XTensor &smallA, const XTensor &smallB, int whereToMerge)
{
CheckNTErrors(XTensor::IsIdentical(&smallA, &smallB),
CheckNTErrors(XTensor::IsSameShaped(&smallA, &smallB),
"The two tensors must be of the same size!");
int order = smallA.order;
......
......@@ -36,7 +36,7 @@ sort the tensor along a given dimension
*/
void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim)
{
CheckNTErrors((XTensor::IsIdentical(a, b)), "Input tensors should have the same type!");
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((dim >= 0 && dim < a->order), "Incorrect dimension specified!");
CheckNTErrors((a->order == index->order), "Unmatched input tensors!");
CheckNTErrors((index->dataType == X_INT), "Wrong data type!");
......
......@@ -106,7 +106,7 @@ void _HardTanHBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
LOSS_FUNCTION_NAME lossName)
{
CheckNTErrors((gold == NULL || XTensor::IsIdentical(gold, y)),
CheckNTErrors((gold == NULL || XTensor::IsSameShaped(gold, y)),
"The tensors must be of the same size!");
#ifdef USE_CUDA
......
......@@ -72,7 +72,7 @@ void _IdentityBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
LOSS_FUNCTION_NAME lossName)
{
CheckNTErrors((gold == NULL || XTensor::IsIdentical(gold, y)),
CheckNTErrors((gold == NULL || XTensor::IsSameShaped(gold, y)),
"The tensors must be of the same size!");
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE)
......
......@@ -309,7 +309,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
else {
CheckNTErrors((XTensor::IsIdentical(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) {
gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize;
......@@ -363,7 +363,7 @@ void _LogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
else {
CheckNTErrors((XTensor::IsIdentical(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) {
gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize;
......
......@@ -409,7 +409,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName);
}
else {
CheckNTErrors((XTensor::IsIdentical(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) {
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
......
......@@ -48,7 +48,7 @@ DTYPE _LossCompute(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName,
DTYPE error = 0.0F;
if (output->devID < 0) {
CheckNTErrors((gLen >= 0 && gLen <= output->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::IsIdentical(gold, output)), "The input tensors must be of the same size!");
CheckNTErrors((XTensor::IsSameShaped(gold, output)), "The input tensors must be of the same size!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE),
......@@ -206,7 +206,7 @@ DTYPE _LossComputeForLogScale(XTensor * gold, XTensor * output,
int leadDim, int gBeg, int gLen, int oBeg)
{
CheckNTErrors(gLen >= 0 && gLen <= output->unitNum, "Illegal input length!");
CheckNTErrors(XTensor::IsIdentical(gold, output), "The input tensors must be of the same size!");
CheckNTErrors(XTensor::IsSameShaped(gold, output), "The input tensors must be of the same size!");
CheckNTErrors(gold->dimSizeRDI[0] == 1 && output->dimSizeRDI[0] == 1, "TODO!");
CheckNTErrors(gold->order > leadDim && leadDim >= 0, "Illegal leading dimension!");
CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE, "TODO!");
......@@ -402,9 +402,10 @@ void _LossBackward(XTensor * dedy, XTensor * t, XTensor * y,
if (y->devID < 0) {
CheckNTErrors(tLen <= y->unitNum, "Illegal input length!");
CheckNTErrors(XTensor::IsIdentical(t, y)&& XTensor::IsIdentical(dedy, y),
"The input tensors must be of the same size!");
CheckNTErrors((dedy->devID == t->devID) && (dedy->devID == y->devID), "Tensor must be on the same device!");
CheckNTErrors(XTensor::IsSameShaped(t, y)&& XTensor::IsSameShaped(dedy, y),
"The input tensors must be of the same size!");
CheckNTErrors((dedy->devID == t->devID) && (dedy->devID == y->devID),
"Tensor must be on the same device!");
CheckNTErrors(t->order > leadDim, "Illegal leading dimension!");
CheckNTErrors(t->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE, "TODO!");
......
......@@ -55,7 +55,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg)
{
CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::IsIdentical(gold, y)), "The input tensors must be of the same size!");
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The input tensors must be of the same size!");
CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!");
CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!");
CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE),
......@@ -333,20 +333,21 @@ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y,
int leadDim, int tBeg, int tLen, int yBeg)
{
CheckNTErrors((tLen <= y->unitNum), "Illegal input length!");
CheckNTErrors((XTensor::IsIdentical(t, y)&& XTensor::IsIdentical(dedy, y)),
"The input tensors must be of the same size!");
CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)), "Tensor must be on the same device!");
CheckNTErrors((XTensor::IsSameShaped(t, y)&& XTensor::IsSameShaped(dedy, y)),
"The input tensors must be of the same size!");
CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)),
"Tensor must be on the same device!");
CheckNTErrors((t->order > leadDim), "Illegal leading dimension!");
CheckNTErrors((t->dataType == DEFAULT_DTYPE &&
y->dataType == DEFAULT_DTYPE &&
dedy->dataType == DEFAULT_DTYPE),
"Input vectors are not in default type.");
y->dataType == DEFAULT_DTYPE &&
dedy->dataType == DEFAULT_DTYPE),
"Input vectors are not in default type.");
CheckNTErrors((dedy->devID >= 0 && t->devID >= 0 && y->devID >= 0),
"The backward compuation must be performed on GPUs.");
"The backward compuation must be performed on GPUs.");
CheckNTErrors((dedy->devID == t->devID && dedy->devID == y->devID),
"The vectors must be on the same GPU.");
"The vectors must be on the same GPU.");
CheckNTErrors((tBeg == yBeg), "TODO!");
int leadDimRDI = leadDim >= 0 ? y->order - leadDim - 1 : -1;
......
......@@ -103,7 +103,7 @@ void _RectifyBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
LOSS_FUNCTION_NAME lossName)
{
CheckNTErrors((gold == NULL || XTensor::IsIdentical(gold, y)),
CheckNTErrors((gold == NULL || XTensor::IsSameShaped(gold, y)),
"The tensors must be of the same size!");
#ifdef USE_CUDA
......
......@@ -94,8 +94,8 @@ void _SigmoidBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
LOSS_FUNCTION_NAME lossName)
{
CheckNTErrors((gold == NULL || XTensor::IsIdentical(gold, y)),
"The tensors must be of the same size!");
CheckNTErrors((gold == NULL || XTensor::IsSameShaped(gold, y)),
"The tensors must be of the same size!");
#ifdef USE_CUDA
if(x->devID >= 0 || y->devID >= 0){
......
......@@ -230,7 +230,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
else{
CheckNTErrors((XTensor::IsIdentical(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!");
for(int k = 0; k < blockNum; k++){
gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize;
......@@ -269,7 +269,7 @@ void _SoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
}
}
else{
CheckNTErrors((XTensor::IsIdentical(gold, y)), "The tensors must be of the same size!");
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!");
for(int k = 0; k < blockNum; k++){
gp = (DTYPE*)gold->data + k * blockSize;
op = (DTYPE*)y->data + k * blockSize;
......
......@@ -167,7 +167,7 @@ void _CudaSoftmaxSumMax(const XTensor * x, XTensor * y, int leadDim, XTensor * s
{
CheckNTErrors((x->devID >= 0), "Forward computation of softmax must be run on GPUs.");
CheckNTErrors((x->devID == y->devID), "Tensors used in softmax are not on the same GPU.");
CheckNTErrors((XTensor::IsIdentical(x, y)), "Input tensors must be of the same size!");
CheckNTErrors((XTensor::IsSameShaped(x, y)), "Input tensors must be of the same size!");
int leadDimRDI = y->order - leadDim - 1;
int dimensionSize = y->dimSizeRDI[leadDimRDI];
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论