Commit 003def3d by xuchen

1.Update test case 2.Bug fixed in MatrxiMulBatched function 3.Bug fixed in SumByColumnVT function

parent f12ced64
......@@ -89,9 +89,9 @@ void MatrixMulBatched(XTensor * a, MATRIX_TRANS_TYPE transposedA,
void * ap = (char*)a->data + aRealBlockSize * p;
void * bp = (char*)b->data + bRealBlockSize * p;
void * cp = (char*)c->data + cRealBlockSize * p;
XTensor * ai = new XTensor(2, aDimSize, a->dataType, a->denseRatio, a->mem);
XTensor * bi = new XTensor(2, bDimSize, b->dataType, b->denseRatio, b->mem);
XTensor * ci = new XTensor(2, cDimSize, c->dataType, c->denseRatio, c->mem);
XTensor * ai = NewTensor(2, aDimSize, a->dataType, a->denseRatio, a->devID, a->mem);
XTensor * bi = NewTensor(2, bDimSize, b->dataType, b->denseRatio, a->devID, b->mem);
XTensor * ci = NewTensor(2, cDimSize, c->dataType, c->denseRatio, a->devID, c->mem);
ai->data = ap;
bi->data = bp;
ci->data = cp;
......
......@@ -52,7 +52,7 @@ void KernelADDByColumnVT(DTYPE * a, DTYPE * b, DTYPE * c, int colNum, int rowNum
DTYPE * bp = b + (rowNum * k + row) * colNum;
if (colNum % 4 == 0) {
for (int i = 0; i < colNum; i += 4)
sum += bp[i] + bp[i + 1] + b[i + 2] + b[i + 3];
sum += bp[i] + bp[i + 1] + bp[i + 2] + bp[i + 3];
}
else if (colNum % 2 == 0) {
for (int i = 0; i < colNum; i += 2)
......@@ -91,7 +91,7 @@ void CudaSumByColumnVT(XTensor * a, XTensor * b, XTensor * c, DTYPE beta)
"Illegal input vector size!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE &&
c->dataType == DEFAULT_DTYPE), "TODO");
int rowNum = b->dimSize[0];
int colNum = b->dimSize[1];
int blockNum = 1;
......@@ -105,7 +105,7 @@ void CudaSumByColumnVT(XTensor * a, XTensor * b, XTensor * c, DTYPE beta)
int devIDBackup = 0;
ProtectCudaDev(a->devID, devIDBackup);
KernelADDByColumnVT << <dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >> >
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, colNum, rowNum, blockNum, beta);
......
......@@ -483,9 +483,9 @@ bool TestConcatenate4()
delete sGPU1;
delete sGPU2;
delete tGPU;
delete[] sDimSize1;
delete[] sDimSize2;
delete[] tDimSize;
//delete[] sDimSize1;
//delete[] sDimSize2;
//delete[] tDimSize;
return cpuTest && gpuTest;
#else
......
......@@ -20,11 +20,12 @@
*/
#include "TCopyIndexed.h"
#include "../xc/Mycode.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/*
case 1 copy indexed sub-tensors
case 1: copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2,
srcIndex = [0, 2], tgtIndex = [0, 1], copyNum = 1.
*/
......@@ -88,7 +89,214 @@ bool TestCopyIndexed1()
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(sOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(sGPU, tGPU, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/*
case 2: copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 2,
srcIndex = [0, 2], tgtIndex = [1, 0], copyNum = 1.
*/
bool TestCopyIndexed2()
{
/* a input tensor of size (3, 2, 3) */
int sOrder = 3;
int * sDimSize = new int[sOrder];
sDimSize[0] = 3;
sDimSize[1] = 2;
sDimSize[2] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (3, 2, 2) */
int tOrder = 3;
int * tDimSize = new int[tOrder];
tDimSize[0] = 3;
tDimSize[1] = 2;
tDimSize[2] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData[3][2][3] = { { {0.0F, -1.0F, 2.0F},
{2.0F, 1.0F, 3.0F} },
{ {1.0F, 2.0F, 4.0F},
{3.0F, 1.0F, 2.0F}},
{ {-1.0F, 3.0F, 2.0F},
{1.0F, -1.0F, 0.0F} } };
DTYPE answer[3][2][2] = { { {2.0F, 0.0F},
{3.0F, 2.0F} },
{ {4.0F, 1.0F},
{2.0F, 3.0F}},
{ {2.0F, -1.0F},
{0.0F, 1.0F} } };
int dim = 2;
int indexSize = 2;
int srcIndex[2] = {0, 2};
int tgtIndex[2] = {1, 0};
int copyNum = 1;
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(s, t, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * tGPU = NewTensor(sOrder, tDimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
sGPU->SetData(sData, sUnitNum);
tGPU->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(sGPU, tGPU, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s;
delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
}
/*
case 3: copy indexed sub-tensors
In this case, (3, 2, 3) -> (3, 2, 2), dim = 2, indexSize = 1,
srcIndex = [0], tgtIndex = [0], copyNum = 2.
*/
bool TestCopyIndexed3()
{
/* a input tensor of size (3, 2, 3) */
int sOrder = 3;
int * sDimSize = new int[sOrder];
sDimSize[0] = 3;
sDimSize[1] = 2;
sDimSize[2] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
/* a output tensor of size (3, 2, 2) */
int tOrder = 3;
int * tDimSize = new int[tOrder];
tDimSize[0] = 3;
tDimSize[1] = 2;
tDimSize[2] = 2;
int tUnitNum = 1;
for (int i = 0; i < tOrder; i++)
tUnitNum *= tDimSize[i];
DTYPE sData[3][2][3] = { { {0.0F, -1.0F, 2.0F},
{2.0F, 1.0F, 3.0F} },
{ {1.0F, 2.0F, 4.0F},
{3.0F, 1.0F, 2.0F}},
{ {-1.0F, 3.0F, 2.0F},
{1.0F, -1.0F, 0.0F} } };
DTYPE answer[3][2][2] = { { {0.0F, -1.0F},
{2.0F, 1.0F} },
{ {1.0F, 2.0F},
{3.0F, 1.0F}},
{ {-1.0F, 3.0F},
{1.0F, -1.0F} } };
int dim = 2;
int indexSize = 1;
int srcIndex[1] = {0};
int tgtIndex[1] = {0};
int copyNum = 2;
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
XTensor * t = NewTensor(tOrder, tDimSize);
/* initialize variables */
s->SetData(sData, sUnitNum);
t->SetZeroAll();
/* call CopyIndexed function */
CopyIndexed(s, t, dim, srcIndex, indexSize, tgtIndex, copyNum);
/* check results */
cpuTest = t->CheckData(answer, tUnitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
......@@ -146,6 +354,24 @@ bool TestCopyIndexed()
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestCopyIndexed2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* case 3 test */
caseFlag = TestCopyIndexed3();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 3 failed!\n");
}
else
XPRINT(0, stdout, ">> case 3 passed!\n");
/* other cases test */
/*
......
......@@ -30,15 +30,15 @@ Identity function: y = x
*/
bool TestIdentity1()
{
/* a input tensor of size (2, 3) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 3;
/* a tensor of size (2, 3) */
int order = 2;
int * dimSize = new int[order];
dimSize[0] = 2;
dimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
DTYPE xData[2][3] = { {0.0F, 1.0F, 2.0F},
{0.5F, 0.7F, 1.4F} };
......@@ -49,47 +49,50 @@ bool TestIdentity1()
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(order, dimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
x->SetData(xData, unitNum);
y->SetZeroAll();
/* call Identity function */
Identity(x, y);
/* check result */
cpuTest = y->CheckData(answer, sUnitNum);
cpuTest = y->CheckData(answer, unitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
xGPU->SetData(xData, unitNum);
yGPU->SetZeroAll();
/* call Identity function */
Identity(xGPU, yGPU);
/* check result */
gpuTest = yGPU->CheckData(answer, sUnitNum);
gpuTest = yGPU->CheckData(answer, unitNum);
/* destroy variables */
delete x, y;
delete xGPU, yGPU;
delete[] sDimSize;
delete x;
delete y;
delete xGPU;
delete yGPU;
delete[] dimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x, y;
delete[] sDimSize;
delete x;
delete y;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -98,35 +101,39 @@ bool TestIdentity1()
/*
case 2: test IdentityBackward function.
IdentityBackward function: dE/dx = dE/dy * dy/dx = dE/dy
In this case, lossName=CROSSENTROPY.
*/
bool TestIdentity2()
{
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 1;
sDimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
DTYPE xData[1][3] = { {1.0F, 1.0F, 2.0F} };
DTYPE gData[1][3] = { {0.0F, 0.0F, 1.0F} };
/* a tensor of size (2, 3) */
int order = 2;
int * dimSize = new int[order];
dimSize[0] = 1;
dimSize[1] = 3;
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
DTYPE xData[3] = {1.0F, 1.0F, 2.0F};
DTYPE gData[3] = {0.0F, 0.0F, 1.0F};
DTYPE yAnswer[3] = {1.0F, 1.0F, 2.0F};
DTYPE dedyAnswer[3] = {0.0F, 0.0F, -0.5F};
DTYPE dedxAnswer[3] = {0.0F, 0.0F, -0.5F};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
XTensor * g = NewTensor(sOrder, sDimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize);
XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(order, dimSize);
XTensor * g = NewTensor(order, dimSize);
XTensor * dedy = NewTensor(order, dimSize);
XTensor * dedx = NewTensor(order, dimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
g->SetData(gData, sUnitNum);
x->SetData(xData, unitNum);
g->SetData(gData, unitNum);
y->SetZeroAll();
dedx->SetZeroAll();
dedy->SetZeroAll();
......@@ -138,22 +145,24 @@ bool TestIdentity2()
IdentityBackward(g, y, x, dedy, dedx, CROSSENTROPY);
/* check result */
cpuTest = dedx->CheckData(dedxAnswer, sUnitNum, 1e-4F);
cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F)
&& dedx->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedy->CheckData(dedyAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
gGPU->SetData(gData, sUnitNum);
xGPU->SetData(xData, unitNum);
gGPU->SetData(gData, unitNum);
yGPU->SetZeroAll();
dedxGPU->SetZeroAll();
dedyGPU->SetZeroAll();
......@@ -165,7 +174,9 @@ bool TestIdentity2()
IdentityBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY);
/* check result */
gpuTest = dedxGPU->CheckData(dedxAnswer, sUnitNum, 1e-4F);
gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F)
&& dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedyGPU->CheckData(dedyAnswer, unitNum, 1e-4F);
/* destroy variables */
delete x;
......@@ -178,7 +189,7 @@ bool TestIdentity2()
delete gGPU;
delete dedxGPU;
delete dedyGPU;
delete[] sDimSize;
delete[] dimSize;
return cpuTest && gpuTest;
#else
......@@ -188,7 +199,7 @@ bool TestIdentity2()
delete g;
delete dedx;
delete dedy;
delete[] sDimSize;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
......
......@@ -20,16 +20,15 @@
*/
#include "../core/math/ScaleAndShift.h"
#include "../function/Loss.h"
#include "TLoss.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/*
case 1: test LossCompute function
case 1: test LossCompute function.
In this case, Loss function name = SQUAREDERROR.
loss = sum_{i} 0.5*(t_i - y_i)^2,
where t_i is the gold standard and y_i is the model output
where t_i is the gold standard and y_i is the model output.
*/
bool TestLoss1()
{
......@@ -103,10 +102,10 @@ bool TestLoss1()
}
/*
case 2: test LossCompute function
case 2: test LossCompute function.
In this case, Loss function name = CROSSENTROPY.
loss = sum_{i} (-t_i * log(y_i))
where t_i is the gold standard and y_i is the model output
where t_i is the gold standard and y_i is the model output.
*/
bool TestLoss2()
{
......@@ -180,10 +179,10 @@ bool TestLoss2()
}
/*
case 3: test LossCompute function
case 3: test LossCompute function.
In this case, Loss function name = ONEHOTERROR.
loss = sum_{i} e_i
where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise
where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise.
*/
bool TestLoss3()
{
......
......@@ -19,6 +19,7 @@
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-15
*/
#include "../XTensor.h"
#include "TMatrixMulBatched.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
......@@ -105,7 +106,7 @@ bool TestMatrixMulBatched1()
/* check results */
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s1;
delete s2;
......
......@@ -29,25 +29,15 @@ In this case, y = max(0, x)
*/
bool TestRectify1()
{
/* a x tensor of size (2, 3) */
int xOrder = 2;
int * xDimSize = new int[xOrder];
xDimSize[0] = 2;
xDimSize[1] = 3;
int xUnitNum = 1;
for (int i = 0; i < xOrder; i++)
xUnitNum *= xDimSize[i];
/* a y tensor of size (2, 3) */
int yOrder = 2;
int * yDimSize = new int[yOrder];
yDimSize[0] = 2;
yDimSize[1] = 3;
int yUnitNum = 1;
for (int i = 0; i < yOrder; i++)
yUnitNum *= yDimSize[i];
/* a tensor of size (2, 3) */
int order = 2;
int * dimSize = new int[order];
dimSize[0] = 2;
dimSize[1] = 3;
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
DTYPE xData[2][3] = { {0.0F, -1.0F, 2.0F},
{3.0F, -4.0F, -5.0F} };
......@@ -58,52 +48,50 @@ bool TestRectify1()
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(xOrder, xDimSize);
XTensor * y = NewTensor(yOrder, yDimSize);
XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(order, dimSize);
/* initialize variables */
x->SetData(xData, xUnitNum);
x->SetData(xData, unitNum);
y->SetZeroAll();
/* call Rectify function */
Rectify(x, y);
/* check results */
cpuTest = y->CheckData(answer, yUnitNum);
cpuTest = y->CheckData(answer, unitNum);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensor */
XTensor * xGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(yOrder, yDimSize, X_FLOAT, 1.0F, 0);
XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* Initialize variables */
xGPU->SetData(xData, xUnitNum);
xGPU->SetData(xData, unitNum);
yGPU->SetZeroAll();
/* call Rectify function */
Rectify(xGPU, yGPU);
/* check results */
gpuTest = yGPU->CheckData(answer, yUnitNum);
gpuTest = yGPU->CheckData(answer, unitNum);
/* destroy variables */
delete x;
delete y;
delete xGPU;
delete yGPU;
delete[] xDimSize;
delete[] yDimSize;
delete[] dimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x;
delete y;
delete[] xDimSize;
delete[] yDimSize;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -117,73 +105,83 @@ In this case, lossName=CROSSENTROPY.
*/
bool TestRectify2()
{
/* a x tensor of size (2, 3) */
int xOrder = 2;
int * xDimSize = new int[xOrder];
xDimSize[0] = 2;
xDimSize[1] = 3;
/* a tensor of size (2, 3) */
int order = 2;
int * dimSize = new int[order];
dimSize[0] = 2;
dimSize[1] = 3;
int xUnitNum = 1;
for (int i = 0; i < xOrder; i++)
xUnitNum *= xDimSize[i];
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
DTYPE xData[2][3] = { {1.0F, 1.0F, 2.0F},
{2.0F, 4.0F, 5.0F} };
DTYPE yData[2][3] = { {1.0F, 1.0F, 2.0F},
{2.0F, 4.0F, 5.0F} };
DTYPE goldData[2][3] = { {1.0F, 1.0F, 1.0F},
{1.0F, 1.0F, 1.0F} };
DTYPE dedyData[2][3] = { {-1.0F, -1.0F, -0.5F},
{-0.5F, -0.25F, -0.2F} };
DTYPE answer[2][3] = { {-1.0F, -1.0F, -0.5F},
{-0.5F, -0.25F, -0.2F} };
DTYPE yAnswer[2][3] = { {1.0F, 1.0F, 2.0F},
{2.0F, 4.0F, 5.0F} };
DTYPE dedyAnswer[2][3] = { {-1.0F, -1.0F, -0.5F},
{-0.5F, -0.25F, -0.2F} };
DTYPE dedxAnswer[2][3] = { {-1.0F, -1.0F, -0.5F},
{-0.5F, -0.25F, -0.2F} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(xOrder, xDimSize);
XTensor * y = NewTensor(xOrder, xDimSize);
XTensor * gold = NewTensor(xOrder, xDimSize);
XTensor * dedy = NewTensor(xOrder, xDimSize);
XTensor * dedx = NewTensor(xOrder, xDimSize);
XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(order, dimSize);
XTensor * gold = NewTensor(order, dimSize);
XTensor * dedy = NewTensor(order, dimSize);
XTensor * dedx = NewTensor(order, dimSize);
/* initialize variables */
x->SetData(xData, xUnitNum);
y->SetData(yData, xUnitNum);
gold->SetData(goldData, xUnitNum);
dedy->SetData(dedyData, xUnitNum);
x->SetData(xData, unitNum);
gold->SetData(goldData, unitNum);
y->SetZeroAll();
dedy->SetZeroAll();
dedx->SetZeroAll();
/* call Rectify function */
Rectify(x, y);
/* call RectifyBackward function */
RectifyBackward(gold, y, x, dedy, dedx, NOLOSS);
RectifyBackward(gold, y, x, dedy, dedx, CROSSENTROPY);
/* check results */
cpuTest = dedx->CheckData(answer, xUnitNum);
cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F)
&& dedx->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedy->CheckData(dedyAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0);
XTensor * goldGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(xOrder, xDimSize, X_FLOAT, 1.0F, 0);
XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * goldGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, xUnitNum);
yGPU->SetData(yData, xUnitNum);
goldGPU->SetData(goldData, xUnitNum);
dedyGPU->SetData(dedyData, xUnitNum);
xGPU->SetData(xData, unitNum);
goldGPU->SetData(goldData, unitNum);
yGPU->SetZeroAll();
dedyGPU->SetZeroAll();
dedxGPU->SetZeroAll();
/* call Rectify function */
Rectify(xGPU, yGPU);
/* call rectifybackward function */
RectifyBackward(goldGPU, yGPU, xGPU, dedyGPU, dedxGPU, NOLOSS);
RectifyBackward(goldGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY);
/* check results */
gpuTest = dedxGPU->CheckData(answer, xUnitNum);
gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F)
&& dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedyGPU->CheckData(dedyAnswer, unitNum, 1e-4F);
/* destroy variables */
delete x;
......@@ -196,7 +194,7 @@ bool TestRectify2()
delete dedyGPU;
delete dedxGPU;
delete goldGPU;
delete[] xDimSize;
delete[] dimSize;
return cpuTest && gpuTest;
#else
......@@ -206,7 +204,7 @@ bool TestRectify2()
delete dedy;
delete dedx;
delete gold;
delete[] xDimSize;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -220,7 +218,7 @@ TODO!!
/* test for Rectify Function */
bool TestRectify()
{
XPRINT(0, stdout, "[TEST RECTIFY] test rectify and its backward computation \n");
XPRINT(0, stdout, "[TEST RECTIFY] rectify function and its backward computation \n");
bool returnFlag = true, caseFlag = true;
/* case 1 test */
......
......@@ -23,8 +23,7 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: set the cell to the ascending order along a given dimension.
*/
/* case 1: set the cell to the ascending order along a given dimension. */
bool TestSetAscendingOrder1()
{
/* a input tensor of size (2, 4) */
......@@ -50,7 +49,6 @@ bool TestSetAscendingOrder1()
s->SetZeroAll();
/* call SetAscendingOrder function */
s->SetAscendingOrder(1);
/* check results */
......
......@@ -23,7 +23,10 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: set the cell to the ascending order along a given dimension. */
/*
case 1: test SetDataRand function.
set the tensor items by a uniform distribution in range [lower, upper].
*/
bool TestSetData1()
{
/* a input tensor of size (2, 4) */
......@@ -44,7 +47,7 @@ bool TestSetData1()
/* create tensors */
XTensor * s = NewTensor(sOrder, sDimSize);
/* call SetData function */
/* call SetDataRand function */
s->SetDataRand(0.0, 1.0);
/* check results */
......
......@@ -25,102 +25,71 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/*
case 1: test Sigmoid function and SigmoidBackward function.
case 1: test Sigmoid function.
sigmoid function: y = 1/(1+exp(-x))
backward computation: dE/ds = dE/dy * dy/dx
*/
bool TestSigmoid1()
{
/* a input tensor of size (3) */
int sOrder = 1;
int * sDimSize = new int[sOrder];
sDimSize[0] = 3;
int order = 1;
int * dimSize = new int[order];
dimSize[0] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
DTYPE xData[3] = {0.0F, 1.0F, 2.0F};
DTYPE gData[3] = {0.4F, 0.8F, 1.0F};
DTYPE dedyData[3] = {-0.8F, -1.094F, -1.135F};
DTYPE yAnswer[3] = {0.5F, 0.731F, 0.881F};
DTYPE dedxAnswer[3] = {-0.2F, -0.215F, -0.119F};
DTYPE answer[3] = {0.5F, 0.7311F, 0.8808F};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
XTensor * g = NewTensor(sOrder, sDimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize);
XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(order, dimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
g->SetData(gData, sUnitNum);
dedy->SetData(dedyData, sUnitNum);
x->SetData(xData, unitNum);
y->SetZeroAll();
dedx->SetZeroAll();
/* call Sigmoid function */
Sigmoid(x, y);
/* call SigmoidBackward function */
SigmoidBackward(g, y, x, dedy, dedx, NOLOSS);
/* check result */
cpuTest = y->CheckData(yAnswer, sUnitNum, 0.001F) && dedx->CheckData(dedxAnswer, sUnitNum, 0.001F);
cpuTest = y->CheckData(answer, unitNum, 1e-4F);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
gGPU->SetData(gData, sUnitNum);
dedyGPU->SetData(dedyData, sUnitNum);
xGPU->SetData(xData, unitNum);
yGPU->SetZeroAll();
dedxGPU->SetZeroAll();
/* call Sigmoid function */
Sigmoid(xGPU, yGPU);
/* call SigmoidBackward function */
SigmoidBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, NOLOSS);
/* check result */
gpuTest = yGPU->CheckData(yAnswer, sUnitNum, 0.001F) && dedxGPU->CheckData(dedxAnswer, sUnitNum, 0.001F);
gpuTest = yGPU->CheckData(answer, unitNum, 1e-4F);
/* destroy variables */
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete xGPU;
delete yGPU;
delete gGPU;
delete dedxGPU;
delete dedyGPU;
delete[] sDimSize;
delete[] dimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x;
delete y;
delete g;
delete dedx;
delete dedy;
delete[] sDimSize;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -129,70 +98,72 @@ bool TestSigmoid1()
/*
case 2: test Sigmoid function and SigmoidBackward function.
sigmoid function: y = 1/(1+exp(-x))
backward computation: dE/ds = dE/dy * dy/dx
backward computation:
dE/ds = dE/dy * dy/dx
dy/dx = y * (1 -y)
In this case, LossName=CROSSENTROPY.
*/
bool TestSigmoid2()
{
/* a input tensor of size (3) */
int sOrder = 1;
int * sDimSize = new int[sOrder];
sDimSize[0] = 3;
int order = 1;
int * dimSize = new int[order];
dimSize[0] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
DTYPE xData[3] = {0.0F, 1.0F, 2.0F};
DTYPE gData[3] = {0.4F, 0.8F, 1.0F};
DTYPE dedyData[3] = {-0.8F, -1.094F, -1.135F};
DTYPE yAnswer[3] = {0.5F, 0.731F, 0.881F};
DTYPE dedxAnswer[3] = {-0.2F, -0.215F, -0.119F};
DTYPE yAnswer[3] = {0.5F, 0.7311F, 0.8808F};
DTYPE dedyAnswer[3] = {-0.8F, -1.0943F, -1.1353F};
DTYPE dedxAnswer[3] = {-0.2F, -0.2151F, -0.1192F};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
XTensor * g = NewTensor(sOrder, sDimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize);
XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(order, dimSize);
XTensor * g = NewTensor(order, dimSize);
XTensor * dedy = NewTensor(order, dimSize);
XTensor * dedx = NewTensor(order, dimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
g->SetData(gData, sUnitNum);
dedy->SetZeroAll();
x->SetData(xData, unitNum);
g->SetData(gData, unitNum);
y->SetZeroAll();
dedy->SetZeroAll();
dedx->SetZeroAll();
/* call Sigmoid function */
Sigmoid(x, y);
/* initialize variables */
dedy->SetData(dedyData, sUnitNum);
/* call SigmoidBackward function */
SigmoidBackward(g, y, x, dedy, dedx, CROSSENTROPY);
/* check result */
cpuTest = y->CheckData(yAnswer, sUnitNum) && dedx->CheckData(dedxAnswer, sUnitNum);
cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F)
&& dedx->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedy->CheckData(dedyAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
gGPU->SetData(gData, sUnitNum);
dedyGPU->SetZeroAll();
xGPU->SetData(xData, unitNum);
gGPU->SetData(gData, unitNum);
yGPU->SetZeroAll();
dedyGPU->SetZeroAll();
dedxGPU->SetZeroAll();
/* call Sigmoid function */
......@@ -202,8 +173,9 @@ bool TestSigmoid2()
SigmoidBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY);
/* check result */
gpuTest = yGPU->CheckData(yAnswer, sUnitNum) && dedxGPU->CheckData(dedxAnswer, sUnitNum);
gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F)
&& dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F)
&& dedyGPU->CheckData(dedyAnswer, unitNum, 1e-4F);
/* destroy variables */
delete x;
delete y;
......@@ -215,7 +187,7 @@ bool TestSigmoid2()
delete gGPU;
delete dedxGPU;
delete dedyGPU;
delete[] sDimSize;
delete[] dimSize;
return cpuTest && gpuTest;
#else
......@@ -225,7 +197,7 @@ bool TestSigmoid2()
delete g;
delete dedx;
delete dedy;
delete[] sDimSize;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -251,6 +223,16 @@ bool TestSigmoid()
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestSigmoid2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */
/*
......
......@@ -31,68 +31,69 @@ softmax function: y = e^x / \sum_{i} e^{x_i}
*/
bool TestSoftmax1()
{
/* a input tensor of size (2, 3) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 2;
sDimSize[1] = 3;
/* a tensor of size (2, 3) */
int order = 2;
int * dimSize = new int[order];
dimSize[0] = 2;
dimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
DTYPE xData[2][3] = { {0.0F, 1.0F, 2.0F},
{0.5F, 0.7F, 1.4F} };
DTYPE answer[2][3] = { {0.09003057F, 0.24472848F, 0.66524094F},
{0.21362929F, 0.2609274F , 0.52544326F} };
DTYPE answer[2][3] = { {0.0900F, 0.2447F, 0.6652F},
{0.2136F, 0.2609F, 0.5254F} };
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(order, dimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
x->SetData(xData, unitNum);
y->SetZeroAll();
/* call Softmax function */
Softmax(x, y, 1);
/* check result */
cpuTest = y->CheckData(answer, sUnitNum);
cpuTest = y->CheckData(answer, unitNum, 1e-4F);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
xGPU->SetData(xData, unitNum);
yGPU->SetZeroAll();
/* call Softmax function */
Softmax(xGPU, yGPU, 1);
/* check result */
gpuTest = yGPU->CheckData(answer, sUnitNum, 0.001F);
gpuTest = yGPU->CheckData(answer, unitNum, 1e-4F);
/* destroy variables */
delete x;
delete y;
delete xGPU;
delete yGPU;
delete[] sDimSize;
delete[] dimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete x, y;
delete[] sDimSize;
delete x;
delete y;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -101,62 +102,66 @@ bool TestSoftmax1()
/*
case 2: test SoftmaxBackward function.
SoftmaxBackward function: dE/dx_j = -gold_j + y_j
In this case, LossName=CROSSENTROPY.
*/
bool TestSoftmax2()
{
/* a input tensor of size (2, 3) */
int sOrder = 2;
int * sDimSize = new int[sOrder];
sDimSize[0] = 1;
sDimSize[1] = 3;
int order = 2;
int * dimSize = new int[order];
dimSize[0] = 1;
dimSize[1] = 3;
int sUnitNum = 1;
for (int i = 0; i < sOrder; i++)
sUnitNum *= sDimSize[i];
int unitNum = 1;
for (int i = 0; i < order; i++)
unitNum *= dimSize[i];
DTYPE xData[1][3] = { {0.0F, 1.0F, 2.0F} };
DTYPE gData[1][3] = { {0.0F, 0.0F, 1.0F} };
DTYPE dedxAnswer[3] = {0.090031F, 0.244728F, -0.334759F};
DTYPE yAnswer[1][3] = { {0.0900F, 0.2447F, 0.6652F} };
DTYPE dedxAnswer[1][3] = {0.0900F, 0.2447F, -0.3347F};
/* CPU test */
bool cpuTest = true;
/* create tensors */
XTensor * x = NewTensor(sOrder, sDimSize);
XTensor * y = NewTensor(sOrder, sDimSize);
XTensor * g = NewTensor(sOrder, sDimSize);
XTensor * dedy = NewTensor(sOrder, sDimSize);
XTensor * dedx = NewTensor(sOrder, sDimSize);
XTensor * x = NewTensor(order, dimSize);
XTensor * y = NewTensor(order, dimSize);
XTensor * g = NewTensor(order, dimSize);
XTensor * dedy = NewTensor(order, dimSize);
XTensor * dedx = NewTensor(order, dimSize);
/* initialize variables */
x->SetData(xData, sUnitNum);
g->SetData(gData, sUnitNum);
x->SetData(xData, unitNum);
g->SetData(gData, unitNum);
y->SetZeroAll();
dedx->SetZeroAll();
dedy->SetZeroAll();
/* call Softmax function */
Softmax(x, y, 1);
/* call SoftmaxBackward function */
SoftmaxBackward(g, y, x, dedy, dedx, 1, CROSSENTROPY);
/* check result */
cpuTest = dedx->CheckData(dedxAnswer, sUnitNum, 0.001F);
cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F)
&& dedx->CheckData(dedxAnswer, unitNum, 1e-4F);
#ifdef USE_CUDA
/* GPU test */
bool gpuTest = true;
/* create tensors */
XTensor * xGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * gGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedyGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
XTensor * dedxGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
/* initialize variables */
xGPU->SetData(xData, sUnitNum);
gGPU->SetData(gData, sUnitNum);
xGPU->SetData(xData, unitNum);
gGPU->SetData(gData, unitNum);
yGPU->SetZeroAll();
dedxGPU->SetZeroAll();
dedyGPU->SetZeroAll();
......@@ -168,7 +173,8 @@ bool TestSoftmax2()
SoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, 1, CROSSENTROPY);
/* check result */
gpuTest = dedxGPU->CheckData(dedxAnswer, sUnitNum, 0.001F);
gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F)
&& dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F);
/* destroy variables */
delete x;
......@@ -181,7 +187,7 @@ bool TestSoftmax2()
delete gGPU;
delete dedxGPU;
delete dedyGPU;
delete[] sDimSize;
delete[] dimSize;
return cpuTest && gpuTest;
#else
......@@ -191,7 +197,7 @@ bool TestSoftmax2()
delete g;
delete dedx;
delete dedy;
delete[] sDimSize;
delete[] dimSize;
return cpuTest;
#endif // USE_CUDA
......
......@@ -181,14 +181,20 @@ bool TestSplit2()
gpuTest = tGPU->CheckData(answer, tUnitNum);
/* destroy variables */
delete s, t, sGPU, tGPU;
delete[] sDimSize, tDimSize;
delete s;
delete t;
delete sGPU;
delete tGPU;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s, t;
delete[] sDimSize, tDimSize;
delete s;
delete t;
delete[] sDimSize;
delete[] tDimSize;
return cpuTest;
#endif // USE_CUDA
......@@ -295,14 +301,25 @@ bool TestSplit3()
gpuTest = tGPU1->CheckData(answer1, tUnitNum1) && tGPU2->CheckData(answer2, tUnitNum2);
/* destroy variables */
delete s, t1, t2, sGPU, tGPU1, tGPU2;
delete[] sDimSize, tDimSize1, tDimSize2;
delete s;
delete t1;
delete t2;
delete sGPU;
delete tGPU1;
delete tGPU2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest && gpuTest;
#else
/* destroy variables */
delete s, t1, t2;
delete[] sDimSize, tDimSize1, tDimSize2;
delete s;
delete t1;
delete t2;
delete[] sDimSize;
delete[] tDimSize1;
delete[] tDimSize2;
return cpuTest;
#endif // USE_CUDA
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论